patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -616,11 +616,12 @@ def build_sdist(sdist_directory, config_settings=None):
files += glob.glob("src/core/**/*.cc", recursive=True)
files += glob.glob("src/core/**/*.h", recursive=True)
files += glob.glob("ci/xbuild/*.py")
+ files += glob.glob("docs/**/*.rst", recursive=True)
files += [f for f in glob.glob("tests/**/*.py", recursive=True)]
files += [f for f in glob.glob("tests_random/*.py")]
files += ["src/datatable/include/datatable.h"]
files.sort()
- files += ["ci/ext.py", "ci/__init__.py"]
+ files += ["ci/ext.py", "ci/__init__.py", "ci/gendoc.py"]
files += ["pyproject.toml"]
files += ["LICENSE"]
# See `is_source_distribution()` | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Copyright 2019-2020 H2O.ai
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
#
# [PEP-517](https://www.python.org/dev/peps/pep-0517/)
# A build-system independent format for source trees
# Specification for a build backend system.
#
# [PEP-440](https://www.python.org/dev/peps/pep-0440/)
# Description of standard version formats.
#
#-------------------------------------------------------------------------------
import glob
import os
import platform
import re
import subprocess
import sys
import textwrap
import time
import xbuild
#-------------------------------------------------------------------------------
# Version handling
#-------------------------------------------------------------------------------
def is_source_distribution():
return not os.path.exists("VERSION.txt") and \
os.path.exists("src/datatable/_build_info.py")
# The primary source of datatable's release version is the file
# VERSION.txt in the root of the repository.
#
# When building the release version of datatable, this file is
# expected to contain the "release version" of the distribution,
# i.e. have form
#
# XX.YY.ZZ
#
# In all other cases, the file is expected to contain the main
# version + optional suffix "a", "b" or "rc":
#
# XX.YY.ZZ[a|b|rc]
#
# If the suffix is absent, then this is presumed to be the
# "post-release" version, and suffix `.post` is automatically added.
#
# This procedure verifies that the content of VERSION.txt is in
# the appropriate format, and returns the augmented version of the
# datatable distribution:
#
# - In release mode (env.variable DT_RELEASE is set), the final
# release is the same as the content of VERSION.txt;
#
# - In PR mode (env.variable DT_BUILD_SUFFIX is present), the final
# version is `VERSION.txt` "0+" `DT_BUILD_SUFFIX`
#
# - In dev-main mode (env.variable DT_BUILD_NUMBER is present),
# the version is equal to `VERSION.txt` BUILD_NUMBER;
#
# - When building from source distribution (file VERSION.txt is
# absent, the version is taken from datatable/_build_info.py) file;
#
# - In all other cases (local build), the final version consists of
# of `VERSION.txt` "0+" [buildmode "."] timestamp ["." username].
#
def get_datatable_version(flavor=None):
build_mode = "release" if os.environ.get("DT_RELEASE") else \
"PR" if os.environ.get("DT_BUILD_SUFFIX") else \
"dev" if os.environ.get("DT_BUILD_NUMBER") else \
"sdist" if is_source_distribution() else \
"local"
if build_mode != "sdist":
if not os.path.exists("VERSION.txt"):
raise SystemExit("File VERSION.txt is missing when building "
"datatable in %s mode" % build_mode)
with open("VERSION.txt", "r") as inp:
version = inp.read().strip()
# In release mode, the version is just the content of VERSION.txt
if build_mode == "release":
if not re.fullmatch(r"\d+(\.\d+)+", version):
raise SystemExit("Invalid version `%s` in VERSION.txt when building"
" datatable in release mode (DT_RELEASE is on)"
% version)
if flavor == "debug":
version += "+debug"
elif flavor not in [None, "sdist", "build"]:
raise SystemExit("Invalid build flavor %s when building datatable "
"in release mode" % flavor)
return version
# In PR mode, the version is appended with DT_BUILD_SUFFIX
if build_mode == "PR":
suffix = os.environ.get("DT_BUILD_SUFFIX")
if not re.fullmatch(r"\w([\w\.]*\w)?", suffix):
raise SystemExit("Invalid build suffix `%s` from environment "
"variable DT_BUILD_SUFFIX" % suffix)
mm = re.fullmatch(r"\d+(\.\d+)+(a|b|rc)?", version)
if not mm:
raise SystemExit("Invalid version `%s` in VERSION.txt when building"
" datatable in PR mode" % version)
if not mm.group(2):
version += "a"
version += "0+" + suffix.lower()
if flavor == "debug":
version += ".debug"
return version
# In "main-dev" mode, the DT_BUILD_NUMBER is used
if build_mode == "dev":
build = os.environ.get("DT_BUILD_NUMBER")
if not re.fullmatch(r"\d+", build):
raise SystemExit("Invalid build number `%s` from environment "
"variable DT_BUILD_NUMBER" % build)
mm = re.fullmatch(r"\d+(\.\d+)+(a|b|rc)?", version)
if not mm:
raise SystemExit("Invalid version `%s` in VERSION.txt when building"
" datatable in development mode" % version)
if not mm.group(2):
version += ".post"
version += build
if flavor == "debug":
version += "+debug"
return version
# Building from sdist (file VERSION.txt not included)
if build_mode == "sdist":
return _get_version_from_build_info()
# Otherwise we're building from a local distribution
if build_mode == "local":
if not version[-1].isdigit():
version += "0"
version += "+"
if flavor:
version += flavor + "."
version += str(int(time.time()))
user = _get_user()
if user:
version += "." + user
return version
def _get_version_from_build_info():
info_file = os.path.join("src", "datatable", "_build_info.py")
if not os.path.exists(info_file):
raise SystemExit("Invalid source distribution: file "
"src/datatable/_build_info.py is missing")
with open(info_file, "r", encoding="utf-8") as inp:
text = inp.read()
mm = re.search(r"\s*version\s*=\s*['\"]([\w\+\.]+)['\"]", text)
if not mm:
raise SystemExit("Cannot find version in src/datatable/"
"_build_info.py file")
return mm.group(1)
def _get_user():
import getpass
try:
user = getpass.getuser()
return re.sub(r"[^a-zA-Z0-9]+", "", user)
except KeyError:
# An exception may be raised if the user is not in /etc/passwd file
return ""
#-------------------------------------------------------------------------------
# Commands implementation
#-------------------------------------------------------------------------------
def create_logger(verbosity):
return (xbuild.Logger0() if verbosity == 0 else \
xbuild.Logger1() if verbosity == 1 else \
xbuild.Logger2() if verbosity == 2 else \
xbuild.Logger3())
def build_extension(cmd, verbosity=3):
assert cmd in ["asan", "build", "coverage", "debug"]
arch = platform.machine() # 'x86_64' or 'ppc64le'
windows = (sys.platform == "win32")
macos = (sys.platform == "darwin")
linux = (sys.platform == "linux")
ppc64 = ("ppc64" in arch or "powerpc64" in arch)
if not (windows or macos or linux):
print("\x1b[93mWarning: unknown platform %s\x1b[m" % sys.platform)
linux = True
ext = xbuild.Extension()
ext.log = create_logger(verbosity)
ext.name = "_datatable"
ext.build_dir = "build/" + cmd
ext.destination_dir = "src/datatable/lib/"
ext.add_sources("src/core/**/*.cc")
# Common compile settings
ext.compiler.enable_colors()
ext.compiler.add_include_dir("src/core")
ext.compiler.add_default_python_include_dir()
if ext.compiler.is_msvc():
# General compiler flags
ext.compiler.add_compiler_flag("/std:c++14")
ext.compiler.add_compiler_flag("/EHsc")
ext.compiler.add_compiler_flag("/nologo")
ext.compiler.add_include_dir(ext.compiler.path + "\\include")
ext.compiler.add_include_dir(ext.compiler.winsdk_include_path + "\\ucrt")
ext.compiler.add_include_dir(ext.compiler.winsdk_include_path + "\\shared")
ext.compiler.add_include_dir(ext.compiler.winsdk_include_path + "\\um")
# Set up the compiler warning level
ext.compiler.add_compiler_flag("/W4")
# Disable particular warnings
ext.compiler.add_compiler_flag(
# "This function or variable may be unsafe"
# issued by MSVC for a fully valid and portable code
"/wd4996",
# "consider using 'if constexpr' statement instead"
# as 'if constexpr' is not available in C++14
"/wd4127",
# "no suitable definition provided for explicit template instantiation
# request" as we want to keep some template method definitions
# in separate translation units
"/wd4661",
# "structure was padded due to alignment specifier"
# as this is exactly the reason why we use the alignment specifier
"/wd4324",
)
# Link flags
ext.compiler.add_linker_flag("/nologo")
ext.compiler.add_linker_flag("/DLL")
ext.compiler.add_linker_flag("/EXPORT:PyInit__datatable")
ext.compiler.add_default_python_lib_dir()
ext.compiler.add_lib_dir(ext.compiler.path + "\\lib\\x64")
ext.compiler.add_lib_dir(ext.compiler.winsdk_lib_path + "\\ucrt\\x64")
ext.compiler.add_lib_dir(ext.compiler.winsdk_lib_path + "\\um\\x64")
if cmd == "asan":
raise RuntimeError("`make asan` is not supported on Windows systems")
if cmd == "build":
ext.compiler.add_compiler_flag("/O2") # full optimization
if cmd == "coverage":
raise RuntimeError("`make coverage` is not supported on Windows systems")
if cmd == "debug":
ext.compiler.add_compiler_flag("/Od") # no optimization
ext.compiler.add_compiler_flag("/Z7")
ext.compiler.add_linker_flag("/DEBUG:FULL")
else:
# Common compile flags
ext.compiler.add_compiler_flag("-std=c++14")
# "-stdlib=libc++" (clang ???)
ext.compiler.add_compiler_flag("-fPIC")
# -pthread is recommended for compiling/linking multithreaded apps
ext.compiler.add_compiler_flag("-pthread")
ext.compiler.add_linker_flag("-pthread")
# Common link flags
ext.compiler.add_linker_flag("-shared")
ext.compiler.add_linker_flag("-g")
ext.compiler.add_linker_flag("-m64")
if macos:
ext.compiler.add_linker_flag("-undefined", "dynamic_lookup")
if linux:
ext.compiler.add_linker_flag("-lstdc++")
if cmd == "asan":
ext.compiler.add_compiler_flag("-fsanitize=address")
ext.compiler.add_compiler_flag("-fno-omit-frame-pointer")
ext.compiler.add_compiler_flag("-fsanitize-address-use-after-scope")
ext.compiler.add_compiler_flag("-shared-libasan")
ext.compiler.add_compiler_flag("-g3")
ext.compiler.add_compiler_flag("-glldb" if macos else "-ggdb")
ext.compiler.add_compiler_flag("-O0")
ext.compiler.add_compiler_flag("-DDTTEST", "-DDT_DEBUG")
ext.compiler.add_linker_flag("-fsanitize=address", "-shared-libasan")
if cmd == "build":
ext.compiler.add_compiler_flag("-g2") # include some debug info
ext.compiler.add_compiler_flag("-O3") # full optimization
if cmd == "coverage":
ext.compiler.add_compiler_flag("-g2")
ext.compiler.add_compiler_flag("-O0")
ext.compiler.add_compiler_flag("--coverage")
ext.compiler.add_compiler_flag("-DDTTEST", "-DDT_DEBUG")
ext.compiler.add_linker_flag("-O0")
ext.compiler.add_linker_flag("--coverage")
if cmd == "debug":
ext.compiler.add_compiler_flag("-g3")
ext.compiler.add_compiler_flag("-glldb" if macos else "-ggdb")
ext.compiler.add_compiler_flag("-O0") # no optimization
ext.compiler.add_compiler_flag("-DDTTEST", "-DDT_DEBUG")
if ext.compiler.flavor == "clang":
ext.compiler.add_compiler_flag("-fdebug-macro")
# Compiler warnings
if ext.compiler.is_clang():
ext.compiler.add_compiler_flag(
"-Weverything",
"-Wno-c++98-compat-pedantic",
"-Wno-c99-extensions",
"-Wno-exit-time-destructors",
"-Wno-float-equal",
"-Wno-global-constructors",
"-Wno-reserved-id-macro",
"-Wno-switch-enum",
"-Wno-poison-system-directories",
"-Wno-unknown-warning-option",
"-Wno-weak-template-vtables",
"-Wno-poison-system-directories",
"-Wno-weak-vtables",
"-Wno-unknown-warning-option",
)
else:
ext.compiler.add_compiler_flag(
"-Wall",
"-Wno-unused-value",
"-Wno-unknown-pragmas"
)
ext.add_prebuild_trigger(generate_documentation)
# Setup is complete, ready to build
ext.build()
return ext.output_file
def generate_documentation(ext):
hfile = "src/core/documentation.h"
ccfile = "src/core/documentation.cc"
docfiles = glob.glob("docs/api/**/*.rst", recursive=True)
if ext.is_modified(hfile):
import gendoc
ext.log.report_generating_docs(ccfile)
gendoc.generate_documentation(hfile, ccfile, docfiles)
ext.add_sources(ccfile)
def get_meta():
return dict(
name="datatable",
version=_get_version_from_build_info(),
summary="Python library for fast multi-threaded data manipulation and "
"munging.",
description="""
This is a Python package for manipulating 2-dimensional tabular data
structures (aka data frames). It is close in spirit to pandas or SFrame;
however we put specific emphasis on speed and big data support. As the
name suggests, the package is closely related to R's data.table and
attempts to mimic its core algorithms and API.
See https://github.com/h2oai/datatable for more details.
""",
keywords=["datatable", "data", "dataframe", "frame", "data.table",
"munging", "numpy", "pandas", "data processing", "ETL"],
# Author details
author="Pasha Stetsenko",
author_email="[email protected]",
maintainer="Oleksiy Kononenko",
maintainer_email="[email protected]",
home_page="https://github.com/h2oai/datatable",
license="Mozilla Public License v2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Information Analysis",
],
# Runtime dependencies
requirements=[
"pytest (>=3.1); extra == 'tests'",
"docutils (>=0.14); extra == 'tests'",
"numpy; extra == 'optional'",
"pandas; extra == 'optional'",
"xlrd; extra == 'optional'",
],
requires_python=">=3.6",
)
#-------------------------------------------------------------------------------
# Build info file
#-------------------------------------------------------------------------------
def shell_cmd(cmd, strict=False):
try:
return subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
if strict:
raise SystemExit("Command `%s` failed with code %d: %s"
% (" ".join(cmd), e.returncode, e.output))
return ""
def generate_build_info(mode=None, strict=False):
"""
Gather the build information and write it into the
datatable/_build_info.py file.
Parameters
----------
mode: str
Used only for local version tags, the mode is the first part
of such local tag.
strict: bool
If False, then the errors in git commands will be silently
ignored, and the produced _build_info.py file will contain
empty `git_revision` and `git_branch` fields.
If True, then the errors in git commands will terminate the
build process.
"""
version = get_datatable_version(mode)
build_date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
git_hash = shell_cmd(["git", "rev-parse", "HEAD"], strict=strict)
# get the date of the commit (HEAD), as a Unix timestamp
git_date = shell_cmd(["git", "show", "-s", "--format=%ct", "HEAD"],
strict=strict)
if "CHANGE_BRANCH" in os.environ:
git_branch = os.environ["CHANGE_BRANCH"]
elif "APPVEYOR_REPO_BRANCH" in os.environ:
git_branch = os.environ["APPVEYOR_REPO_BRANCH"]
else:
git_branch = shell_cmd(["git", "rev-parse", "--abbrev-ref", "HEAD"],
strict=strict)
git_diff = shell_cmd(["git", "diff", "HEAD", "--stat", "--no-color"],
strict=strict)
# Reformat the `git_date` as a UTC time string
if git_date:
git_date = time.strftime("%Y-%m-%d %H:%M:%S",
time.gmtime(int(git_date)))
if mode == 'build':
mode = 'release'
info_file = os.path.join("src", "datatable", "_build_info.py")
with open(info_file, "wt") as out:
out.write(
"#!/usr/bin/env python3\n"
"# -*- encoding: utf-8 -*-\n"
"# --------------------------------------------------------------\n"
"# Copyright 2018-%d H2O.ai\n"
"#\n"
"# Permission is hereby granted, free of charge, to any person\n"
"# obtaining a copy of this software and associated documentation\n"
"# files (the 'Software'), to deal in the Software without\n"
"# restriction, including without limitation the rights to use,\n"
"# copy, modify, merge, publish, distribute, sublicense, and/or\n"
"# sell copies of the Software, and to permit persons to whom the\n"
"# Software is furnished to do so, subject to the following\n"
"# conditions:\n"
"#\n"
"# The above copyright notice and this permission notice shall be\n"
"# included in all copies or substantial portions of the\n"
"# Software.\n"
"#\n"
"# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY\n"
"# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n"
"# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n"
"# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n"
"# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n"
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n"
"# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n"
"# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
"# --------------------------------------------------------------\n"
"# This file was auto-generated from ci/ext.py\n\n"
% time.gmtime().tm_year
)
out.write("import types\n\n")
out.write("try:\n")
out.write(" import datatable.lib._datatable as _dt\n")
out.write(" _compiler = _dt._compiler()\n")
out.write("except:\n")
out.write(" _compiler = 'unknown'\n")
out.write("\n")
out.write("build_info = types.SimpleNamespace(\n")
out.write(" version='%s',\n" % version)
out.write(" build_date='%s',\n" % build_date)
out.write(" build_mode='%s',\n" % mode)
out.write(" compiler=_compiler,\n")
out.write(" git_revision='%s',\n" % git_hash)
out.write(" git_branch='%s',\n" % git_branch)
out.write(" git_date='%s',\n" % git_date)
if git_diff:
lines = git_diff.split('\n')
assert not any("'" in line for line in lines)
out.write(" git_diff='%s" % lines[0].strip())
for line in lines[1:]:
out.write("\\n'\n '%s" % line.strip())
out.write("',\n")
else:
out.write(" git_diff='',\n")
out.write(")\n")
#-------------------------------------------------------------------------------
# Standard hooks
#-------------------------------------------------------------------------------
def build_wheel(wheel_directory, config_settings=None, metadata_directory=None):
"""
Function for building wheels, satisfies requirements of PEP-517.
"""
if config_settings is None:
config_settings = {}
assert isinstance(wheel_directory, str)
assert isinstance(config_settings, dict)
assert metadata_directory is None
reuse_extension = config_settings.pop("reuse_extension", False)
reuse_version = config_settings.pop("reuse_version", None)
debug_wheel = config_settings.pop("debug", False)
if is_source_distribution() and reuse_version is None:
config_settings["reuse_version"] = True
if not reuse_version:
flavor = "custom" if reuse_extension else \
"debug" if debug_wheel else \
"build"
generate_build_info(flavor, strict=not is_source_distribution())
assert os.path.isfile("src/datatable/_build_info.py")
if reuse_extension:
pyver = "%d%d" % sys.version_info[:2]
soext = "dll" if sys.platform == "win32" else "so"
pattern = "src/datatable/lib/_datatable.cpython-%s*.%s" % (pyver, soext)
sofiles = glob.glob(pattern)
if not sofiles:
raise SystemExit("Extension file %s not found" % pattern)
if len(sofiles) > 1:
raise SystemExit("Multiple extension files found: %r" % (sofiles,))
so_file = sofiles[0]
else:
so_file = build_extension(cmd=("debug" if debug_wheel else "build"),
verbosity=3)
files = glob.glob("src/datatable/**/*.py", recursive=True)
files += [so_file]
files += ["src/datatable/include/datatable.h"]
files = [(f, f[4:]) # (src_file, destination_file)
for f in files if "_datatable_builder.py" not in f]
files.sort()
meta = get_meta()
wb = xbuild.Wheel(files, **meta, **config_settings)
wb.log = create_logger(verbosity=3)
wheel_file = wb.build_wheel(wheel_directory)
return wheel_file
def build_sdist(sdist_directory, config_settings=None):
"""
Function for building source distributions, satisfies PEP-517.
"""
assert isinstance(sdist_directory, str)
assert config_settings is None or isinstance(config_settings, dict)
generate_build_info("sdist", strict=True)
files = [f for f in glob.glob("src/datatable/**/*.py", recursive=True)
if "_datatable_builder.py" not in f]
files += glob.glob("src/core/**/*.cc", recursive=True)
files += glob.glob("src/core/**/*.h", recursive=True)
files += glob.glob("ci/xbuild/*.py")
files += [f for f in glob.glob("tests/**/*.py", recursive=True)]
files += [f for f in glob.glob("tests_random/*.py")]
files += ["src/datatable/include/datatable.h"]
files.sort()
files += ["ci/ext.py", "ci/__init__.py"]
files += ["pyproject.toml"]
files += ["LICENSE"]
# See `is_source_distribution()`
assert "VERSION.txt" not in files
meta = get_meta()
wb = xbuild.Wheel(files, **meta)
wb.log = create_logger(verbosity=3)
sdist_file = wb.build_sdist(sdist_directory)
return sdist_file
#-------------------------------------------------------------------------------
# Allow this script to run from command line
#-------------------------------------------------------------------------------
def cmd_ext(args):
with open("src/datatable/lib/.xbuild-cmd", "wt") as out:
out.write(args.cmd)
generate_build_info(args.cmd, strict=args.strict)
build_extension(cmd=args.cmd, verbosity=args.verbosity)
def cmd_geninfo(args):
generate_build_info(strict=args.strict)
def cmd_sdist(args):
sdist_file = build_sdist(args.destination)
assert os.path.isfile(os.path.join(args.destination, sdist_file))
def cmd_wheel(args):
params = {
"audit": args.audit,
"debug": (args.cmd == "debugwheel"),
"reuse_extension": args.nobuild,
}
wheel_file = build_wheel(args.destination, params)
assert os.path.isfile(os.path.join(args.destination, wheel_file))
def main():
import argparse
parser = argparse.ArgumentParser(
description='Build _datatable module',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("cmd", metavar="CMD",
choices=["asan", "build", "coverage", "debug", "geninfo", "sdist",
"wheel", "debugwheel"],
help=textwrap.dedent("""
Specify what this script should do:
asan : build _datatable with Address Sanitizer enabled
build : build _datatable normally, with full optimization
coverage : build _datatable in a mode suitable for coverage
testing
debug : build _datatable in debug mode, optimized for gdb
on Linux and for lldb on MacOS
geninfo : generate _build_info.py file
sdist : create source distribution of datatable
wheel : create wheel distribution of datatable
debugwheel : create wheel distribution of debug version of datatable
""").strip())
parser.add_argument("-v", dest="verbosity", action="count", default=1,
help="Verbosity level of the output, specify the parameter up to 3\n"
"times for maximum verbosity; the default level is 1.")
parser.add_argument("-d", dest="destination", default="dist",
help="Destination directory for `sdist` and `wheel` commands.")
parser.add_argument("--audit", action="store_true",
help="This flag can be used with cmd='wheel' only, on a Linux\n"
"platform, which must have the 'auditwheel' external tool\n"
"installed. If this flag is specified, then after building a\n"
"wheel, it will be tested with the auditwheel. If the test\n"
"succeeds, i.e. the wheel is found to be compatible with a\n"
"manylinux* tag, then the wheel will be renamed to use the new\n"
"tag. Otherwise, an error will be raised.")
parser.add_argument("--strict", action="store_true",
help="This flag is used for `geninfo` command: when given, the\n"
"generated _build_info.py file is guaranteed to contain the\n"
"git_revision and git_branch fields, or otherwise an error\n"
"will be thrown. This flag is turned on automatically for\n"
"`sdist` and `wheel` commands.")
parser.add_argument("--nobuild", action="store_true",
help="This flag is used for `wheel` command: it indicates that\n"
"the _datatable dynamic library should not be rebuilt.\n"
"Instead, the library will be taken as-is from the lib/\n"
"folder. The user is expected to have it pre-built manually.")
args = parser.parse_args()
if args.audit and "linux" not in sys.platform:
raise ValueError("Argument --audit can be used on a Linux platform "
"only, current platform is `%s`" % sys.platform)
if "wheel" in args.cmd: cmd_wheel(args)
elif args.cmd == "sdist": cmd_sdist(args)
elif args.cmd == "geninfo": cmd_geninfo(args)
else: cmd_ext(args)
if __name__ == "__main__":
main()
| 1 | 13,149 | Looks like it is pretty easy to miss it when adding new files under `ci`. Hopefully, this doesn't happen too often. | h2oai-datatable | py |
@@ -244,6 +244,7 @@ func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey,
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
+ defer p2ps.Ready()
if !o.Standalone {
if natManager := p2ps.NATManager(); natManager != nil { | 1 | // Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package node defines the concept of a Bee node
// by bootstrapping and injecting all necessary
// dependencies.
package node
import (
"context"
"crypto/ecdsa"
"fmt"
"io"
"log"
"math/big"
"net"
"net/http"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/feeds/factory"
"github.com/ethersphere/bee/pkg/hive"
"github.com/ethersphere/bee/pkg/kademlia"
"github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/pricing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/puller"
"github.com/ethersphere/bee/pkg/pullsync"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/pusher"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/pkg/retrieval"
settlement "github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/pseudosettle"
"github.com/ethersphere/bee/pkg/settlement/swap"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/transaction"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/traversal"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type Bee struct {
p2pService io.Closer
p2pCancel context.CancelFunc
apiCloser io.Closer
apiServer *http.Server
debugAPIServer *http.Server
resolverCloser io.Closer
errorLogWriter *io.PipeWriter
tracerCloser io.Closer
tagsCloser io.Closer
stateStoreCloser io.Closer
localstoreCloser io.Closer
topologyCloser io.Closer
pusherCloser io.Closer
pullerCloser io.Closer
pullSyncCloser io.Closer
pssCloser io.Closer
ethClientCloser func()
recoveryHandleCleanup func()
}
type Options struct {
DataDir string
DBCapacity uint64
APIAddr string
DebugAPIAddr string
Addr string
NATAddr string
EnableWS bool
EnableQUIC bool
WelcomeMessage string
Bootnodes []string
CORSAllowedOrigins []string
Logger logging.Logger
Standalone bool
TracingEnabled bool
TracingEndpoint string
TracingServiceName string
GlobalPinningEnabled bool
PaymentThreshold string
PaymentTolerance string
PaymentEarly string
ResolverConnectionCfgs []multiresolver.ConnectionConfig
GatewayMode bool
SwapEndpoint string
SwapFactoryAddress string
SwapInitialDeposit string
SwapEnable bool
}
func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, signer crypto.Signer, networkID uint64, logger logging.Logger, libp2pPrivateKey, pssPrivateKey *ecdsa.PrivateKey, o Options) (*Bee, error) {
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
ServiceName: o.TracingServiceName,
})
if err != nil {
return nil, fmt.Errorf("tracer: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
b := &Bee{
p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
tracerCloser: tracerCloser,
}
var debugAPIService *debugapi.Service
if o.DebugAPIAddr != "" {
overlayEthAddress, err := signer.EthereumAddress()
if err != nil {
return nil, fmt.Errorf("eth address: %w", err)
}
// set up basic debug api endpoints for debugging and /health endpoint
debugAPIService = debugapi.New(swarmAddress, publicKey, pssPrivateKey.PublicKey, overlayEthAddress, logger, tracer, o.CORSAllowedOrigins)
debugAPIListener, err := net.Listen("tcp", o.DebugAPIAddr)
if err != nil {
return nil, fmt.Errorf("debug api listener: %w", err)
}
debugAPIServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: debugAPIService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("debug api address: %s", debugAPIListener.Addr())
if err := debugAPIServer.Serve(debugAPIListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("debug api server: %v", err)
logger.Error("unable to serve debug api")
}
}()
b.debugAPIServer = debugAPIServer
}
stateStore, err := InitStateStore(logger, o.DataDir)
if err != nil {
return nil, err
}
b.stateStoreCloser = stateStore
addressbook := addressbook.New(stateStore)
var swapBackend *ethclient.Client
var overlayEthAddress common.Address
var chainID int64
var transactionService transaction.Service
var chequebookFactory chequebook.Factory
var chequebookService chequebook.Service
var chequeStore chequebook.ChequeStore
var cashoutService chequebook.CashoutService
if o.SwapEnable {
swapBackend, overlayEthAddress, chainID, transactionService, err = InitChain(
p2pCtx,
logger,
stateStore,
o.SwapEndpoint,
signer,
)
if err != nil {
return nil, err
}
b.ethClientCloser = swapBackend.Close
chequebookFactory, err = InitChequebookFactory(
logger,
swapBackend,
chainID,
transactionService,
o.SwapFactoryAddress,
)
if err != nil {
return nil, err
}
if err = chequebookFactory.VerifyBytecode(p2pCtx); err != nil {
return nil, fmt.Errorf("factory fail: %w", err)
}
chequebookService, err = InitChequebookService(
p2pCtx,
logger,
stateStore,
signer,
chainID,
swapBackend,
overlayEthAddress,
transactionService,
chequebookFactory,
o.SwapInitialDeposit,
)
if err != nil {
return nil, err
}
chequeStore, cashoutService = initChequeStoreCashout(
stateStore,
swapBackend,
chequebookFactory,
chainID,
overlayEthAddress,
transactionService,
)
}
p2ps, err := libp2p.New(p2pCtx, signer, networkID, swarmAddress, addr, addressbook, stateStore, logger, tracer, libp2p.Options{
PrivateKey: libp2pPrivateKey,
NATAddr: o.NATAddr,
EnableWS: o.EnableWS,
EnableQUIC: o.EnableQUIC,
Standalone: o.Standalone,
WelcomeMessage: o.WelcomeMessage,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
if !o.Standalone {
if natManager := p2ps.NATManager(); natManager != nil {
// wait for nat manager to init
logger.Debug("initializing NAT manager")
select {
case <-natManager.Ready():
// this is magic sleep to give NAT time to sync the mappings
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second)
logger.Debug("NAT manager initialized")
case <-time.After(10 * time.Second):
logger.Warning("NAT manager init timeout")
}
}
}
// Construct protocols.
pingPong := pingpong.New(p2ps, logger, tracer)
if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
return nil, fmt.Errorf("pingpong service: %w", err)
}
hive := hive.New(p2ps, addressbook, networkID, logger)
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
}
var bootnodes []ma.Multiaddr
if o.Standalone {
logger.Info("Starting node in standalone mode, no p2p connections will be made or accepted")
} else {
for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
}
bootnodes = append(bootnodes, addr)
}
}
var settlement settlement.Interface
var swapService *swap.Service
if o.SwapEnable {
swapService, err = InitSwap(
p2ps,
logger,
stateStore,
networkID,
overlayEthAddress,
chequebookService,
chequeStore,
cashoutService,
)
if err != nil {
return nil, err
}
settlement = swapService
} else {
pseudosettleService := pseudosettle.New(p2ps, logger, stateStore)
if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
return nil, fmt.Errorf("pseudosettle service: %w", err)
}
settlement = pseudosettleService
}
paymentThreshold, ok := new(big.Int).SetString(o.PaymentThreshold, 10)
if !ok {
return nil, fmt.Errorf("invalid payment threshold: %s", paymentThreshold)
}
pricing := pricing.New(p2ps, logger, paymentThreshold)
if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
return nil, fmt.Errorf("pricing service: %w", err)
}
paymentTolerance, ok := new(big.Int).SetString(o.PaymentTolerance, 10)
if !ok {
return nil, fmt.Errorf("invalid payment tolerance: %s", paymentTolerance)
}
paymentEarly, ok := new(big.Int).SetString(o.PaymentEarly, 10)
if !ok {
return nil, fmt.Errorf("invalid payment early: %s", paymentEarly)
}
acc, err := accounting.NewAccounting(
paymentThreshold,
paymentTolerance,
paymentEarly,
logger,
stateStore,
settlement,
pricing,
)
if err != nil {
return nil, fmt.Errorf("accounting: %w", err)
}
settlement.SetNotifyPaymentFunc(acc.AsyncNotifyPayment)
pricing.SetPaymentThresholdObserver(acc)
kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, logger, kademlia.Options{Bootnodes: bootnodes, Standalone: o.Standalone})
b.topologyCloser = kad
hive.SetAddPeersHandler(kad.AddPeers)
p2ps.SetNotifier(kad)
addrs, err := p2ps.Addresses()
if err != nil {
return nil, fmt.Errorf("get server addresses: %w", err)
}
for _, addr := range addrs {
logger.Debugf("p2p address: %s", addr)
}
var path string
if o.DataDir != "" {
path = filepath.Join(o.DataDir, "localstore")
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
}
storer, err := localstore.New(path, swarmAddress.Bytes(), lo, logger)
if err != nil {
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, accounting.NewFixedPricer(swarmAddress, 1000000000), tracer)
tagService := tags.NewTags(stateStore, logger)
b.tagsCloser = tagService
if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err)
}
pssService := pss.New(pssPrivateKey, logger)
b.pssCloser = pssService
var ns storage.Storer
if o.GlobalPinningEnabled {
// create recovery callback for content repair
recoverFunc := recovery.NewCallback(pssService)
ns = netstore.New(storer, recoverFunc, retrieve, logger)
} else {
ns = netstore.New(storer, nil, retrieve, logger)
}
traversalService := traversal.NewService(ns)
pushSyncProtocol := pushsync.New(p2ps, storer, kad, tagService, pssService.TryUnwrap, logger, acc, accounting.NewFixedPricer(swarmAddress, 1000000000), tracer)
// set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol)
if err = p2ps.AddProtocol(pushSyncProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("pushsync service: %w", err)
}
if o.GlobalPinningEnabled {
// register function for chunk repair upon receiving a trojan message
chunkRepairHandler := recovery.NewRepairHandler(ns, logger, pushSyncProtocol)
b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler)
}
pushSyncPusher := pusher.New(storer, kad, pushSyncProtocol, tagService, logger, tracer)
b.pusherCloser = pushSyncPusher
pullStorage := pullstorage.New(storer)
pullSync := pullsync.New(p2ps, pullStorage, pssService.TryUnwrap, logger)
b.pullSyncCloser = pullSync
if err = p2ps.AddProtocol(pullSync.Protocol()); err != nil {
return nil, fmt.Errorf("pullsync protocol: %w", err)
}
puller := puller.New(stateStore, kad, pullSync, logger, puller.Options{})
b.pullerCloser = puller
multiResolver := multiresolver.NewMultiResolver(
multiresolver.WithConnectionConfigs(o.ResolverConnectionCfgs),
multiresolver.WithLogger(o.Logger),
)
b.resolverCloser = multiResolver
var apiService api.Service
if o.APIAddr != "" {
// API server
feedFactory := factory.New(ns)
apiService = api.New(tagService, ns, multiResolver, pssService, traversalService, feedFactory, logger, tracer, api.Options{
CORSAllowedOrigins: o.CORSAllowedOrigins,
GatewayMode: o.GatewayMode,
WsPingPeriod: 60 * time.Second,
})
apiListener, err := net.Listen("tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
apiServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: apiService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("api address: %s", apiListener.Addr())
if err := apiServer.Serve(apiListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("api server: %v", err)
logger.Error("unable to serve api")
}
}()
b.apiServer = apiServer
b.apiCloser = apiService
}
if debugAPIService != nil {
// register metrics from components
debugAPIService.MustRegisterMetrics(p2ps.Metrics()...)
debugAPIService.MustRegisterMetrics(pingPong.Metrics()...)
debugAPIService.MustRegisterMetrics(acc.Metrics()...)
debugAPIService.MustRegisterMetrics(storer.Metrics()...)
debugAPIService.MustRegisterMetrics(puller.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncProtocol.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncPusher.Metrics()...)
debugAPIService.MustRegisterMetrics(pullSync.Metrics()...)
debugAPIService.MustRegisterMetrics(retrieve.Metrics()...)
if pssServiceMetrics, ok := pssService.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(pssServiceMetrics.Metrics()...)
}
if apiService != nil {
debugAPIService.MustRegisterMetrics(apiService.Metrics()...)
}
if l, ok := logger.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
if l, ok := settlement.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
// inject dependencies and configure full debug api http path routes
debugAPIService.Configure(p2ps, pingPong, kad, storer, tagService, acc, settlement, o.SwapEnable, swapService, chequebookService)
}
if err := kad.Start(p2pCtx); err != nil {
return nil, err
}
return b, nil
}
func (b *Bee) Shutdown(ctx context.Context) error {
errs := new(multiError)
if b.apiCloser != nil {
if err := b.apiCloser.Close(); err != nil {
errs.add(fmt.Errorf("api: %w", err))
}
}
var eg errgroup.Group
if b.apiServer != nil {
eg.Go(func() error {
if err := b.apiServer.Shutdown(ctx); err != nil {
return fmt.Errorf("api server: %w", err)
}
return nil
})
}
if b.debugAPIServer != nil {
eg.Go(func() error {
if err := b.debugAPIServer.Shutdown(ctx); err != nil {
return fmt.Errorf("debug api server: %w", err)
}
return nil
})
}
if err := eg.Wait(); err != nil {
errs.add(err)
}
if b.recoveryHandleCleanup != nil {
b.recoveryHandleCleanup()
}
if err := b.pusherCloser.Close(); err != nil {
errs.add(fmt.Errorf("pusher: %w", err))
}
if err := b.pullerCloser.Close(); err != nil {
errs.add(fmt.Errorf("puller: %w", err))
}
if err := b.pullSyncCloser.Close(); err != nil {
errs.add(fmt.Errorf("pull sync: %w", err))
}
if err := b.pssCloser.Close(); err != nil {
errs.add(fmt.Errorf("pss: %w", err))
}
b.p2pCancel()
if err := b.p2pService.Close(); err != nil {
errs.add(fmt.Errorf("p2p server: %w", err))
}
if c := b.ethClientCloser; c != nil {
c()
}
if err := b.tracerCloser.Close(); err != nil {
errs.add(fmt.Errorf("tracer: %w", err))
}
if err := b.tagsCloser.Close(); err != nil {
errs.add(fmt.Errorf("tag persistence: %w", err))
}
if err := b.stateStoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("statestore: %w", err))
}
if err := b.localstoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("localstore: %w", err))
}
if err := b.topologyCloser.Close(); err != nil {
errs.add(fmt.Errorf("topology driver: %w", err))
}
if err := b.errorLogWriter.Close(); err != nil {
errs.add(fmt.Errorf("error log writer: %w", err))
}
// Shutdown the resolver service only if it has been initialized.
if b.resolverCloser != nil {
if err := b.resolverCloser.Close(); err != nil {
errs.add(fmt.Errorf("resolver service: %w", err))
}
}
if errs.hasErrors() {
return errs
}
return nil
}
type multiError struct {
errors []error
}
func (e *multiError) Error() string {
if len(e.errors) == 0 {
return ""
}
s := e.errors[0].Error()
for _, err := range e.errors[1:] {
s += "; " + err.Error()
}
return s
}
func (e *multiError) add(err error) {
e.errors = append(e.errors, err)
}
func (e *multiError) hasErrors() bool {
return len(e.errors) > 0
}
| 1 | 14,212 | do we really need the `Ready` call if startup fails midway? | ethersphere-bee | go |
@@ -17,7 +17,9 @@ namespace storage {
TEST(AddVerticesTest, SimpleTest) {
fs::TempDir rootPath("/tmp/AddVerticesTest.XXXXXX");
- std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path());
+ constexpr int32_t partitions = 6;
+ std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path(), partitions,
+ {0, network::NetworkUtils::getAvailablePort()});
auto schemaMan = TestUtils::mockSchemaMan();
auto indexMan = TestUtils::mockIndexMan();
auto* processor = AddVerticesProcessor::instance(kv.get(), | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "utils/NebulaKeyUtils.h"
#include <gtest/gtest.h>
#include <rocksdb/db.h>
#include "fs/TempDir.h"
#include "storage/test/TestUtils.h"
#include "storage/mutate/AddVerticesProcessor.h"
namespace nebula {
namespace storage {
TEST(AddVerticesTest, SimpleTest) {
fs::TempDir rootPath("/tmp/AddVerticesTest.XXXXXX");
std::unique_ptr<kvstore::KVStore> kv = TestUtils::initKV(rootPath.path());
auto schemaMan = TestUtils::mockSchemaMan();
auto indexMan = TestUtils::mockIndexMan();
auto* processor = AddVerticesProcessor::instance(kv.get(),
schemaMan.get(),
indexMan.get(),
nullptr);
LOG(INFO) << "Build AddVerticesRequest...";
cpp2::AddVerticesRequest req;
req.space_id = 0;
req.overwritable = true;
// partId => List<Vertex>
// Vertex => {Id, List<VertexProp>}
// VertexProp => {tagId, tags}
for (PartitionID partId = 0; partId < 3; partId++) {
auto vertices = TestUtils::setupVertices(partId, partId * 10, 10 * (partId + 1));
req.parts.emplace(partId, std::move(vertices));
}
LOG(INFO) << "Test AddVerticesProcessor...";
auto fut = processor->getFuture();
processor->process(req);
auto resp = std::move(fut).get();
EXPECT_EQ(0, resp.result.failed_codes.size());
LOG(INFO) << "Check data in kv store...";
for (PartitionID partId = 0; partId < 3; partId++) {
for (VertexID vertexId = 10 * partId; vertexId < 10 * (partId + 1); vertexId++) {
auto prefix = NebulaKeyUtils::vertexPrefix(partId, vertexId);
std::unique_ptr<kvstore::KVIterator> iter;
EXPECT_EQ(kvstore::ResultCode::SUCCEEDED, kv->prefix(0, partId, prefix, &iter));
TagID tagId = 0;
while (iter->valid()) {
EXPECT_EQ(TestUtils::encodeValue(partId, vertexId, tagId), iter->val());
tagId++;
iter->next();
}
EXPECT_EQ(10, tagId);
}
}
}
} // namespace storage
} // namespace nebula
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
folly::init(&argc, &argv, true);
google::SetStderrLogging(google::INFO);
return RUN_ALL_TESTS();
}
| 1 | 28,922 | You can avoid changing the code if there are parameter defaults. right ? | vesoft-inc-nebula | cpp |
@@ -214,7 +214,7 @@ describe('Components', () => {
let good, bad;
let root = render(<GoodContainer ref={c=>good=c} />, scratch);
- expect(scratch.innerText, 'new component with key present').to.equal('A\nB');
+ expect(scratch.textContent, 'new component with key present').to.equal('AB');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
| 1 | import { h, render, rerender, Component } from '../../src/preact';
/** @jsx h */
let spyAll = obj => Object.keys(obj).forEach( key => sinon.spy(obj,key) );
function getAttributes(node) {
let attrs = {};
if (node.attributes) {
for (let i=node.attributes.length; i--; ) {
attrs[node.attributes[i].name] = node.attributes[i].value;
}
}
return attrs;
}
// hacky normalization of attribute order across browsers.
function sortAttributes(html) {
return html.replace(/<([a-z0-9-]+)((?:\s[a-z0-9:_.-]+=".*?")+)((?:\s*\/)?>)/gi, (s, pre, attrs, after) => {
let list = attrs.match(/\s[a-z0-9:_.-]+=".*?"/gi).sort( (a, b) => a>b ? 1 : -1 );
if (~after.indexOf('/')) after = '></'+pre+'>';
return '<' + pre + list.join('') + after;
});
}
const Empty = () => null;
describe('Components', () => {
let scratch;
before( () => {
scratch = document.createElement('div');
(document.body || document.documentElement).appendChild(scratch);
});
beforeEach( () => {
let c = scratch.firstElementChild;
if (c) render(<Empty />, scratch, c);
scratch.innerHTML = '';
});
after( () => {
scratch.parentNode.removeChild(scratch);
scratch = null;
});
it('should render components', () => {
class C1 extends Component {
render() {
return <div>C1</div>;
}
}
sinon.spy(C1.prototype, 'render');
render(<C1 />, scratch);
expect(C1.prototype.render)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch({}, {})
.and.to.have.returned(sinon.match({ nodeName:'div' }));
expect(scratch.innerHTML).to.equal('<div>C1</div>');
});
it('should render functional components', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
const C3 = sinon.spy( props => <div {...props} /> );
render(<C3 {...PROPS} />, scratch);
expect(C3)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS
}));
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
it('should render components with props', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
let constructorProps;
class C2 extends Component {
constructor(props) {
super(props);
constructorProps = props;
}
render(props) {
return <div {...props} />;
}
}
sinon.spy(C2.prototype, 'render');
render(<C2 {...PROPS} />, scratch);
expect(constructorProps).to.deep.equal(PROPS);
expect(C2.prototype.render)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS, {})
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS
}));
expect(scratch.innerHTML).to.equal('<div foo="bar"></div>');
});
// Test for Issue #73
it('should remove orphaned elements replaced by Components', () => {
class Comp extends Component {
render() {
return <span>span in a component</span>;
}
}
let root;
function test(content) {
root = render(content, scratch, root);
}
test(<Comp />);
test(<div>just a div</div>);
test(<Comp />);
expect(scratch.innerHTML).to.equal('<span>span in a component</span>');
});
// Test for Issue #176
it('should remove children when root changes to text node', () => {
let comp;
class Comp extends Component {
render(_, { alt }) {
return alt ? 'asdf' : <div>test</div>;
}
}
render(<Comp ref={c=>comp=c} />, scratch);
comp.setState({ alt:true });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to textnode').to.equal('asdf');
comp.setState({ alt:false });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to element').to.equal('<div>test</div>');
comp.setState({ alt:true });
comp.forceUpdate();
expect(scratch.innerHTML, 'switching to textnode 2').to.equal('asdf');
});
// Test for Issue #254
it('should not recycle common class children with different keys', () => {
let idx = 0;
let msgs = ['A','B','C','D','E','F','G','H'];
let comp1, comp2, comp3;
let sideEffect = sinon.spy();
class Comp extends Component {
componentWillMount() {
this.innerMsg = msgs[(idx++ % 8)];
console.log('innerMsg', this.innerMsg);
sideEffect();
}
render() {
return <div>{this.innerMsg}</div>;
}
}
sinon.spy(Comp.prototype, 'componentWillMount');
class GoodContainer extends Component {
constructor(props) {
super(props);
this.state.alt = false;
}
render(_, {alt}) {
return (
<div>
{alt ? null : (<Comp ref={c=>comp1=c} key={1} alt={alt}/>)}
{alt ? null : (<Comp ref={c=>comp2=c} key={2} alt={alt}/>)}
{alt ? (<Comp ref={c=>comp3=c} key={3} alt={alt}/>) : null}
</div>
);
}
}
class BadContainer extends Component {
constructor(props) {
super(props);
this.state.alt = false;
}
render(_, {alt}) {
return (
<div>
{alt ? null : (<Comp ref={c=>comp1=c} alt={alt}/>)}
{alt ? null : (<Comp ref={c=>comp2=c} alt={alt}/>)}
{alt ? (<Comp ref={c=>comp3=c} alt={alt}/>) : null}
</div>
);
}
}
let good, bad;
let root = render(<GoodContainer ref={c=>good=c} />, scratch);
expect(scratch.innerText, 'new component with key present').to.equal('A\nB');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.reset();
Comp.prototype.componentWillMount.reset();
good.setState({alt: true});
good.forceUpdate();
expect(scratch.innerText, 'new component with key present re-rendered').to.equal('C');
//we are recycling the first 2 components already rendered, just need a new one
expect(Comp.prototype.componentWillMount).to.have.been.calledOnce;
expect(sideEffect).to.have.been.calledOnce;
sideEffect.reset();
Comp.prototype.componentWillMount.reset();
root = render(<BadContainer ref={c=>bad=c} />, scratch, root);
expect(scratch.innerText, 'new component without key').to.equal('D\nE');
expect(Comp.prototype.componentWillMount).to.have.been.calledTwice;
expect(sideEffect).to.have.been.calledTwice;
sideEffect.reset();
Comp.prototype.componentWillMount.reset();
bad.setState({alt: true});
bad.forceUpdate();
expect(scratch.innerText, 'new component without key re-rendered').to.equal('D');
expect(Comp.prototype.componentWillMount).to.not.have.been.called;
expect(sideEffect).to.not.have.been.called;
});
describe('props.children', () => {
it('should support passing children as a prop', () => {
const Foo = props => <div {...props} />;
render(<Foo a="b" children={[
<span class="bar">bar</span>,
'123',
456
]} />, scratch);
expect(scratch.innerHTML).to.equal('<div a="b"><span class="bar">bar</span>123456</div>');
});
it('should be ignored when explicit children exist', () => {
const Foo = props => <div {...props}>a</div>;
render(<Foo children={'b'} />, scratch);
expect(scratch.innerHTML).to.equal('<div>a</div>');
});
});
describe('High-Order Components', () => {
it('should render nested functional components', () => {
const PROPS = { foo:'bar', onBaz:()=>{} };
const Outer = sinon.spy(
props => <Inner {...props} />
);
const Inner = sinon.spy(
props => <div {...props}>inner</div>
);
render(<Outer {...PROPS} />, scratch);
expect(Outer)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: Inner,
attributes: PROPS
}));
expect(Inner)
.to.have.been.calledOnce
.and.to.have.been.calledWithMatch(PROPS)
.and.to.have.returned(sinon.match({
nodeName: 'div',
attributes: PROPS,
children: ['inner']
}));
expect(scratch.innerHTML).to.equal('<div foo="bar">inner</div>');
});
it('should re-render nested functional components', () => {
let doRender = null;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
const Inner = sinon.spy(
props => <div j={ ++j } {...props}>inner</div>
);
render(<Outer foo="bar" />, scratch);
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount)
.not.to.have.been.called;
expect(Inner).to.have.been.calledTwice;
expect(Inner.secondCall)
.to.have.been.calledWithMatch({ foo:'bar', i:2 })
.and.to.have.returned(sinon.match({
attributes: {
j: 2,
i: 2,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
// update & flush
doRender();
rerender();
expect(Inner).to.have.been.calledThrice;
expect(Inner.thirdCall)
.to.have.been.calledWithMatch({ foo:'bar', i:3 })
.and.to.have.returned(sinon.match({
attributes: {
j: 3,
i: 3,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
});
it('should re-render nested components', () => {
let doRender = null,
alt = false;
class Outer extends Component {
componentDidMount() {
let i = 1;
doRender = () => this.setState({ i: ++i });
}
componentWillUnmount() {}
render(props, { i }) {
if (alt) return <div is-alt />;
return <Inner i={i} {...props} />;
}
}
sinon.spy(Outer.prototype, 'render');
sinon.spy(Outer.prototype, 'componentDidMount');
sinon.spy(Outer.prototype, 'componentWillUnmount');
let j = 0;
class Inner extends Component {
constructor(...args) {
super();
this._constructor(...args);
}
_constructor() {}
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
componentDidUnmount() {}
render(props) {
return <div j={ ++j } {...props}>inner</div>;
}
}
sinon.spy(Inner.prototype, '_constructor');
sinon.spy(Inner.prototype, 'render');
sinon.spy(Inner.prototype, 'componentWillMount');
sinon.spy(Inner.prototype, 'componentDidMount');
sinon.spy(Inner.prototype, 'componentDidUnmount');
sinon.spy(Inner.prototype, 'componentWillUnmount');
render(<Outer foo="bar" />, scratch);
expect(Outer.prototype.componentDidMount).to.have.been.calledOnce;
// update & flush
doRender();
rerender();
expect(Outer.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype._constructor).to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentDidUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledTwice;
expect(Inner.prototype.render.secondCall)
.to.have.been.calledWithMatch({ foo:'bar', i:2 })
.and.to.have.returned(sinon.match({
attributes: {
j: 2,
i: 2,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '2',
i: '2',
foo: 'bar'
});
expect(sortAttributes(scratch.innerHTML)).to.equal(sortAttributes('<div foo="bar" j="2" i="2">inner</div>'));
// update & flush
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).not.to.have.been.called;
expect(Inner.prototype.componentDidUnmount).not.to.have.been.called;
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.render).to.have.been.calledThrice;
expect(Inner.prototype.render.thirdCall)
.to.have.been.calledWithMatch({ foo:'bar', i:3 })
.and.to.have.returned(sinon.match({
attributes: {
j: 3,
i: 3,
foo: 'bar'
}
}));
expect(getAttributes(scratch.firstElementChild)).to.eql({
j: '3',
i: '3',
foo: 'bar'
});
// update & flush
alt = true;
doRender();
rerender();
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidUnmount).to.have.been.calledOnce;
expect(scratch.innerHTML).to.equal('<div is-alt="true"></div>');
// update & flush
alt = false;
doRender();
rerender();
expect(sortAttributes(scratch.innerHTML)).to.equal(sortAttributes('<div foo="bar" j="4" i="5">inner</div>'));
});
it('should resolve intermediary functional component', () => {
let ctx = {};
class Root extends Component {
getChildContext() {
return { ctx };
}
render() {
return <Func />;
}
}
const Func = sinon.spy( () => <Inner /> );
class Inner extends Component {
componentWillMount() {}
componentDidMount() {}
componentWillUnmount() {}
componentDidUnmount() {}
render() {
return <div>inner</div>;
}
}
spyAll(Inner.prototype);
let root = render(<Root />, scratch);
expect(Inner.prototype.componentWillMount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount).to.have.been.calledOnce;
expect(Inner.prototype.componentWillMount).to.have.been.calledBefore(Inner.prototype.componentDidMount);
render(<asdf />, scratch, root);
expect(Inner.prototype.componentWillUnmount).to.have.been.calledOnce;
expect(Inner.prototype.componentDidUnmount).to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount).to.have.been.calledBefore(Inner.prototype.componentDidUnmount);
});
it('should unmount children of high-order components without unmounting parent', () => {
let outer, inner2, counter=0;
class Outer extends Component {
constructor(props, context) {
super(props, context);
outer = this;
this.state = {
child: this.props.child
};
}
componentWillUnmount(){}
componentDidUnmount(){}
componentWillMount(){}
componentDidMount(){}
render(_, { child:C }) {
return <C />;
}
}
spyAll(Outer.prototype);
class Inner extends Component {
componentWillUnmount(){}
componentDidUnmount(){}
componentWillMount(){}
componentDidMount(){}
render() {
return h('element'+(++counter));
}
}
spyAll(Inner.prototype);
class Inner2 extends Component {
constructor(props, context) {
super(props, context);
inner2 = this;
}
componentWillUnmount(){}
componentDidUnmount(){}
componentWillMount(){}
componentDidMount(){}
render() {
return h('element'+(++counter));
}
}
spyAll(Inner2.prototype);
render(<Outer child={Inner} />, scratch);
// outer should only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer initial').to.have.been.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer initial').to.have.been.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer initial').not.to.have.been.called;
expect(Outer.prototype.componentDidUnmount, 'outer initial').not.to.have.been.called;
// inner should only have been mounted once
expect(Inner.prototype.componentWillMount, 'inner initial').to.have.been.calledOnce;
expect(Inner.prototype.componentDidMount, 'inner initial').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'inner initial').not.to.have.been.called;
expect(Inner.prototype.componentDidUnmount, 'inner initial').not.to.have.been.called;
outer.setState({ child:Inner2 });
outer.forceUpdate();
expect(Inner2.prototype.render).to.have.been.calledOnce;
// outer should still only have been mounted once
expect(Outer.prototype.componentWillMount, 'outer swap').to.have.been.calledOnce;
expect(Outer.prototype.componentDidMount, 'outer swap').to.have.been.calledOnce;
expect(Outer.prototype.componentWillUnmount, 'outer swap').not.to.have.been.called;
expect(Outer.prototype.componentDidUnmount, 'outer swap').not.to.have.been.called;
// inner should only have been mounted once
expect(Inner2.prototype.componentWillMount, 'inner2 swap').to.have.been.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 swap').to.have.been.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 swap').not.to.have.been.called;
expect(Inner2.prototype.componentDidUnmount, 'inner2 swap').not.to.have.been.called;
inner2.forceUpdate();
expect(Inner2.prototype.render, 'inner2 update').to.have.been.calledTwice;
expect(Inner2.prototype.componentWillMount, 'inner2 update').to.have.been.calledOnce;
expect(Inner2.prototype.componentDidMount, 'inner2 update').to.have.been.calledOnce;
expect(Inner2.prototype.componentWillUnmount, 'inner2 update').not.to.have.been.called;
expect(Inner2.prototype.componentDidUnmount, 'inner2 update').not.to.have.been.called;
});
it('should remount when swapping between HOC child types', () => {
class Outer extends Component {
render({ child: Child }) {
return <Child />;
}
}
class Inner extends Component {
componentWillMount() {}
componentWillUnmount() {}
render() {
return <div class="inner">foo</div>;
}
}
spyAll(Inner.prototype);
const InnerFunc = () => (
<div class="inner-func">bar</div>
);
let root = render(<Outer child={Inner} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'initial mount').not.to.have.been.called;
Inner.prototype.componentWillMount.reset();
root = render(<Outer child={InnerFunc} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'unmount').not.to.have.been.called;
expect(Inner.prototype.componentWillUnmount, 'unmount').to.have.been.calledOnce;
Inner.prototype.componentWillUnmount.reset();
root = render(<Outer child={Inner} />, scratch, root);
expect(Inner.prototype.componentWillMount, 'remount').to.have.been.calledOnce;
expect(Inner.prototype.componentWillUnmount, 'remount').not.to.have.been.called;
});
});
describe('Component Nesting', () => {
let useIntermediary = false;
let createComponent = (Intermediary) => {
class C extends Component {
componentWillMount() {}
componentDidUnmount() {}
render({ children }) {
if (!useIntermediary) return children[0];
let I = useIntermediary===true ? Intermediary : useIntermediary;
return <I>{children}</I>;
}
}
spyAll(C.prototype);
return C;
};
let createFunction = () => sinon.spy( ({ children }) => children[0] );
let root;
let rndr = n => root = render(n, scratch, root);
let F1 = createFunction();
let F2 = createFunction();
let F3 = createFunction();
let C1 = createComponent(F1);
let C2 = createComponent(F2);
let C3 = createComponent(F3);
let reset = () => [C1, C2, C3].reduce(
(acc, c) => acc.concat( Object.keys(c.prototype).map(key => c.prototype[key]) ),
[F1, F2, F3]
).forEach( c => c.reset && c.reset() );
it('should handle lifecycle for no intermediary in component tree', () => {
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost, C2').not.to.have.been.called;
expect(C3.prototype.componentDidUnmount, 'unmount innermost, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost').not.to.have.been.called;
expect(C2.prototype.componentDidUnmount, 'swap innermost').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'swap innermost').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentDidUnmount, 'inject between, C1').not.to.have.been.called;
expect(C1.prototype.componentWillMount, 'inject between, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2').to.have.been.calledOnce;
expect(C3.prototype.componentDidUnmount, 'inject between, C3').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3').to.have.been.calledOnce;
});
it('should handle lifecycle for nested intermediary functional components', () => {
useIntermediary = true;
rndr(<div />);
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount w/ intermediary fn, C1').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount w/ intermediary fn, C2').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount w/ intermediary fn, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost w/ intermediary fn, C1').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost w/ intermediary fn, C2').not.to.have.been.called;
expect(C3.prototype.componentDidUnmount, 'unmount innermost w/ intermediary fn, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost w/ intermediary fn').not.to.have.been.called;
expect(C2.prototype.componentDidUnmount, 'swap innermost w/ intermediary fn').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'swap innermost w/ intermediary fn').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentDidUnmount, 'inject between, C1 w/ intermediary fn').not.to.have.been.called;
expect(C1.prototype.componentWillMount, 'inject between, C1 w/ intermediary fn').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2 w/ intermediary fn').to.have.been.calledOnce;
expect(C3.prototype.componentDidUnmount, 'inject between, C3 w/ intermediary fn').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3 w/ intermediary fn').to.have.been.calledOnce;
});
it('should handle lifecycle for nested intermediary elements', () => {
useIntermediary = 'div';
rndr(<div />);
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentWillMount, 'initial mount w/ intermediary div, C1').to.have.been.calledOnce;
expect(C2.prototype.componentWillMount, 'initial mount w/ intermediary div, C2').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'initial mount w/ intermediary div, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C2>Some Text</C2></C1>);
expect(C1.prototype.componentWillMount, 'unmount innermost w/ intermediary div, C1').not.to.have.been.called;
expect(C2.prototype.componentDidUnmount, 'unmount innermost w/ intermediary div, C2 ummount').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'unmount innermost w/ intermediary div, C2').not.to.have.been.called;
expect(C3.prototype.componentDidUnmount, 'unmount innermost w/ intermediary div, C3').to.have.been.calledOnce;
reset();
rndr(<C1><C3>Some Text</C3></C1>);
expect(C1.prototype.componentWillMount, 'swap innermost w/ intermediary div').not.to.have.been.called;
expect(C2.prototype.componentDidUnmount, 'swap innermost w/ intermediary div').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'swap innermost w/ intermediary div').to.have.been.calledOnce;
reset();
rndr(<C1><C2><C3>Some Text</C3></C2></C1>);
expect(C1.prototype.componentDidUnmount, 'inject between, C1 w/ intermediary div').not.to.have.been.called;
expect(C1.prototype.componentWillMount, 'inject between, C1 w/ intermediary div').not.to.have.been.called;
expect(C2.prototype.componentWillMount, 'inject between, C2 w/ intermediary div').to.have.been.calledOnce;
expect(C3.prototype.componentDidUnmount, 'inject between, C3 w/ intermediary div').to.have.been.calledOnce;
expect(C3.prototype.componentWillMount, 'inject between, C3 w/ intermediary div').to.have.been.calledOnce;
});
});
});
| 1 | 10,224 | I wonder why the newline disappeared here? I guess we'll merge and see how SauceLabs fares across the supported browsers. | preactjs-preact | js |
@@ -4252,10 +4252,15 @@ void command_corpsefix(Client *c, const Seperator *sep)
void command_reloadworld(Client *c, const Seperator *sep)
{
- c->Message(Chat::White, "Reloading quest cache and repopping zones worldwide.");
+ int world_repop = atoi(sep->arg[1]);
+ if (world_repop == 0)
+ c->Message(Chat::White, "Reloading quest cache worldwide.");
+ else
+ c->Message(Chat::White, "Reloading quest cache and repopping zones worldwide.");
+
auto pack = new ServerPacket(ServerOP_ReloadWorld, sizeof(ReloadWorld_Struct));
ReloadWorld_Struct* RW = (ReloadWorld_Struct*) pack->pBuffer;
- RW->Option = ((atoi(sep->arg[1]) == 1) ? 1 : 0);
+ RW->Option = world_repop;
worldserver.SendPacket(pack);
safe_delete(pack);
} | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2016 EQEMu Development Team (http://eqemulator.org)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
To add a new command 3 things must be done:
1. At the bottom of command.h you must add a prototype for it.
2. Add the function in this file.
3. In the command_init function you must add a call to command_add
for your function.
Notes: If you want an alias for your command, add an entry to the
`command_settings` table in your database. The access level you
set with command_add is the default setting if the command isn't
listed in the `command_settings` db table.
*/
#include <string.h>
#include <stdlib.h>
#include <sstream>
#include <algorithm>
#include <ctime>
#include <thread>
#include <fmt/format.h>
#ifdef _WINDOWS
#define strcasecmp _stricmp
#endif
#include "../common/global_define.h"
#include "../common/eq_packet.h"
#include "../common/features.h"
#include "../common/guilds.h"
#include "../common/patches/patches.h"
#include "../common/ptimer.h"
#include "../common/rulesys.h"
#include "../common/serverinfo.h"
#include "../common/string_util.h"
#include "../common/say_link.h"
#include "../common/eqemu_logsys.h"
#include "../common/profanity_manager.h"
#include "../common/net/eqstream.h"
#include "data_bucket.h"
#include "command.h"
#include "guild_mgr.h"
#include "map.h"
#include "qglobals.h"
#include "queryserv.h"
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "titles.h"
#include "water_map.h"
#include "worldserver.h"
#include "fastmath.h"
#include "mob_movement_manager.h"
#include "npc_scale_manager.h"
extern QueryServ* QServ;
extern WorldServer worldserver;
extern TaskManager *taskmanager;
extern FastMath g_Math;
void CatchSignal(int sig_num);
int commandcount; // how many commands we have
// this is the pointer to the dispatch function, updated once
// init has been performed to point at the real function
int (*command_dispatch)(Client *,char const *)=command_notavail;
void command_bestz(Client *c, const Seperator *message);
void command_pf(Client *c, const Seperator *message);
std::map<std::string, CommandRecord *> commandlist;
std::map<std::string, std::string> commandaliases;
// All allocated CommandRecords get put in here so they get deleted on shutdown
LinkedList<CommandRecord *> cleanup_commandlist;
/*
* command_notavail
* This is the default dispatch function when commands aren't loaded.
*
* Parameters:
* not used
*
*/
int command_notavail(Client *c, const char *message)
{
c->Message(Chat::Red, "Commands not available.");
return -1;
}
/**************************************************************************
/* the rest below here could be in a dynamically loaded module eventually *
/*************************************************************************/
/*
Access Levels:
0 Normal
10 * Steward *
20 * Apprentice Guide *
50 * Guide *
80 * QuestTroupe *
81 * Senior Guide *
85 * GM-Tester *
90 * EQ Support *
95 * GM-Staff *
100 * GM-Admin *
150 * GM-Lead Admin *
160 * QuestMaster *
170 * GM-Areas *
180 * GM-Coder *
200 * GM-Mgmt *
250 * GM-Impossible *
*/
/*
* command_init
* initializes the command list, call at startup
*
* Parameters:
* none
*
* When adding a new command, only hard-code 'real' commands -
* all command aliases are added later through a database call
*
*/
int command_init(void)
{
commandaliases.clear();
if (
command_add("acceptrules", "[acceptrules] - Accept the EQEmu Agreement", 0, command_acceptrules) ||
command_add("advnpcspawn", "[maketype|makegroup|addgroupentry|addgroupspawn][removegroupspawn|movespawn|editgroupbox|cleargroupbox]", 150, command_advnpcspawn) ||
command_add("aggro", "(range) [-v] - Display aggro information for all mobs 'range' distance from your target. -v is verbose faction info.", 80, command_aggro) ||
command_add("aggrozone", "[aggro] - Aggro every mob in the zone with X aggro. Default is 0. Not recommend if you're not invulnerable.", 100, command_aggrozone) ||
command_add("ai", "[factionid/spellslist/con/guard/roambox/stop/start] - Modify AI on NPC target", 100, command_ai) ||
command_add("appearance", "[type] [value] - Send an appearance packet for you or your target", 150, command_appearance) ||
command_add("apply_shared_memory", "[shared_memory_name] - Tells every zone and world to apply a specific shared memory segment by name.", 250, command_apply_shared_memory) ||
command_add("attack", "[targetname] - Make your NPC target attack targetname", 150, command_attack) ||
command_add("augmentitem", "Force augments an item. Must have the augment item window open.", 250, command_augmentitem) ||
command_add("ban", "[name] [reason]- Ban by character name", 150, command_ban) ||
command_add("beard", "- Change the beard of your target", 80, command_beard) ||
command_add("beardcolor", "- Change the beard color of your target", 80, command_beardcolor) ||
command_add("bestz", "- Ask map for a good Z coord for your x,y coords.", 0, command_bestz) ||
command_add("bind", "- Sets your targets bind spot to their current location", 200, command_bind) ||
#ifdef BOTS
command_add("bot", "- Type \"#bot help\" or \"^help\" to the see the list of available commands for bots.", 0, command_bot) ||
#endif
command_add("camerashake", "Shakes the camera on everyone's screen globally.", 80, command_camerashake) ||
command_add("castspell", "[spellid] - Cast a spell", 50, command_castspell) ||
command_add("chat", "[channel num] [message] - Send a channel message to all zones", 200, command_chat) ||
command_add("checklos", "- Check for line of sight to your target", 50, command_checklos) ||
command_add("corpse", "- Manipulate corpses, use with no arguments for help", 50, command_corpse) ||
command_add("corpsefix", "Attempts to bring corpses from underneath the ground within close proximity of the player", 0, command_corpsefix) ||
command_add("crashtest", "- Crash the zoneserver", 255, command_crashtest) ||
command_add("cvs", "- Summary of client versions currently online.", 200, command_cvs) ||
command_add("damage", "[amount] - Damage your target", 100, command_damage) ||
command_add("databuckets", "View|Delete [key] [limit]- View data buckets, limit 50 default or Delete databucket by key", 80, command_databuckets) ||
command_add("date", "[yyyy] [mm] [dd] [HH] [MM] - Set EQ time", 90, command_date) ||
command_add("dbspawn2", "[spawngroup] [respawn] [variance] - Spawn an NPC from a predefined row in the spawn2 table", 100, command_dbspawn2) ||
command_add("delacct", "[accountname] - Delete an account", 150, command_delacct) ||
command_add("deletegraveyard", "[zone name] - Deletes the graveyard for the specified zone.", 200, command_deletegraveyard) ||
command_add("delpetition", "[petition number] - Delete a petition", 20, command_delpetition) ||
command_add("depop", "- Depop your NPC target", 50, command_depop) ||
command_add("depopzone", "- Depop the zone", 100, command_depopzone) ||
command_add("devtools", "- Manages devtools", 200, command_devtools) ||
command_add("details", "- Change the details of your target (Drakkin Only)", 80, command_details) ||
command_add("disablerecipe", "[recipe_id] - Disables a recipe using the recipe id.", 80, command_disablerecipe) ||
command_add("disarmtrap", "Analog for ldon disarm trap for the newer clients since we still don't have it working.", 80, command_disarmtrap) ||
command_add("distance", "- Reports the distance between you and your target.", 80, command_distance) ||
command_add("doanim", "[animnum] [type] - Send an EmoteAnim for you or your target", 50, command_doanim) ||
command_add("emote", "['name'/'world'/'zone'] [type] [message] - Send an emote message", 80, command_emote) ||
command_add("emotesearch", "Searches NPC Emotes", 80, command_emotesearch) ||
command_add("emoteview", "Lists all NPC Emotes", 80, command_emoteview) ||
command_add("enablerecipe", "[recipe_id] - Enables a recipe using the recipe id.", 80, command_enablerecipe) ||
command_add("equipitem", "[slotid(0-21)] - Equip the item on your cursor into the specified slot", 50, command_equipitem) ||
command_add("face", "- Change the face of your target", 80, command_face) ||
command_add("faction", "[Find (criteria | all ) | Review (criteria | all) | Reset (id)] - Resets Player's Faction", 80, command_faction) ||
command_add("findaliases", "[search term]- Searches for available command aliases, by alias or command", 0, command_findaliases) ||
command_add("findnpctype", "[search criteria] - Search database NPC types", 100, command_findnpctype) ||
command_add("findspell", "[searchstring] - Search for a spell", 50, command_findspell) ||
command_add("findzone", "[search criteria] - Search database zones", 100, command_findzone) ||
command_add("fixmob", "[race|gender|texture|helm|face|hair|haircolor|beard|beardcolor|heritage|tattoo|detail] [next|prev] - Manipulate appearance of your target", 80, command_fixmob) ||
command_add("flag", "[status] [acctname] - Refresh your admin status, or set an account's admin status if arguments provided", 0, command_flag) ||
command_add("flagedit", "- Edit zone flags on your target", 100, command_flagedit) ||
command_add("flags", "- displays the flags of you or your target", 0, command_flags) ||
command_add("flymode", "[0/1/2/3/4/5] - Set your or your player target's flymode to ground/flying/levitate/water/floating/levitate_running", 50, command_flymode) ||
command_add("fov", "- Check wether you're behind or in your target's field of view", 80, command_fov) ||
command_add("freeze", "- Freeze your target", 80, command_freeze) ||
command_add("gassign", "[id] - Assign targetted NPC to predefined wandering grid id", 100, command_gassign) ||
command_add("gender", "[0/1/2] - Change your or your target's gender to male/female/neuter", 50, command_gender) ||
command_add("getplayerburiedcorpsecount", "- Get the target's total number of buried player corpses.", 100, command_getplayerburiedcorpsecount) ||
command_add("getvariable", "[varname] - Get the value of a variable from the database", 200, command_getvariable) ||
command_add("ginfo", "- get group info on target.", 20, command_ginfo) ||
command_add("giveitem", "[itemid] [charges] - Summon an item onto your target's cursor. Charges are optional.", 200, command_giveitem) ||
command_add("givemoney", "[pp] [gp] [sp] [cp] - Gives specified amount of money to the target player.", 200, command_givemoney) ||
command_add("globalview", "Lists all qglobals in cache if you were to do a quest with this target.", 80, command_globalview) ||
command_add("gm", "- Turn player target's or your GM flag on or off", 80, command_gm) ||
command_add("gmspeed", "[on/off] - Turn GM speed hack on/off for you or your player target", 100, command_gmspeed) ||
command_add("gmzone", "[zone_short_name] [zone_version=0] [identifier=gmzone] - Zones to a private GM instance", 100, command_gmzone) ||
command_add("goto", "[playername] or [x y z] [h] - Teleport to the provided coordinates or to your target", 10, command_goto) ||
command_add("grid", "[add/delete] [grid_num] [wandertype] [pausetype] - Create/delete a wandering grid", 170, command_grid) ||
command_add("guild", "- Guild manipulation commands. Use argument help for more info.", 10, command_guild) ||
command_add("guildapprove", "[guildapproveid] - Approve a guild with specified ID (guild creator receives the id)", 0, command_guildapprove) ||
command_add("guildcreate", "[guildname] - Creates an approval setup for guild name specified", 0, command_guildcreate) ||
command_add("guildlist", "[guildapproveid] - Lists character names who have approved the guild specified by the approve id", 0, command_guildlist) ||
command_add("hair", "- Change the hair style of your target", 80, command_hair) ||
command_add("haircolor", "- Change the hair color of your target", 80, command_haircolor) ||
command_add("haste", "[percentage] - Set your haste percentage", 100, command_haste) ||
command_add("hatelist", " - Display hate list for target.", 80, command_hatelist) ||
command_add("heal", "- Completely heal your target", 10, command_heal) ||
command_add("helm", "- Change the helm of your target", 80, command_helm) ||
command_add("help", "[search term] - List available commands and their description, specify partial command as argument to search", 0, command_help) ||
command_add("heritage", "- Change the heritage of your target (Drakkin Only)", 80, command_heritage) ||
command_add("heromodel", "[hero model] [slot] - Full set of Hero's Forge Armor appearance. If slot is set, sends exact model just to slot.", 200, command_heromodel) ||
command_add("hideme", "[on/off] - Hide yourself from spawn lists.", 80, command_hideme) ||
command_add("hotfix", "[hotfix_name] - Reloads shared memory into a hotfix, equiv to load_shared_memory followed by apply_shared_memory", 250, command_hotfix) ||
command_add("hp", "- Refresh your HP bar from the server.", 0, command_hp) ||
command_add("incstat", "- Increases or Decreases a client's stats permanently.", 200, command_incstat) ||
command_add("instance", "- Modify Instances", 200, command_instance) ||
command_add("interrogateinv", "- use [help] argument for available options", 0, command_interrogateinv) ||
command_add("interrupt", "[message id] [color] - Interrupt your casting. Arguments are optional.", 50, command_interrupt) ||
command_add("invsnapshot", "- Manipulates inventory snapshots for your current target", 80, command_invsnapshot) ||
command_add("invul", "[on/off] - Turn player target's or your invulnerable flag on or off", 80, command_invul) ||
command_add("ipban", "[IP address] - Ban IP by character name", 200, command_ipban) ||
command_add("iplookup", "[charname] - Look up IP address of charname", 200, command_iplookup) ||
command_add("iteminfo", "- Get information about the item on your cursor", 10, command_iteminfo) ||
command_add("itemsearch", "[search criteria] - Search for an item", 10, command_itemsearch) ||
command_add("kick", "[charname] - Disconnect charname", 150, command_kick) ||
command_add("kill", "- Kill your target", 100, command_kill) ||
command_add("killallnpcs", " [npc_name] Kills all npcs by search name, leave blank for all attackable NPC's", 200, command_killallnpcs) ||
command_add("lastname", "[new lastname] - Set your or your player target's lastname", 50, command_lastname) ||
command_add("level", "[level] - Set your or your target's level", 10, command_level) ||
command_add("listnpcs", "[name/range] - Search NPCs", 20, command_listnpcs) ||
command_add("list", "[npcs|players|corpses|doors|objects] [search] - Search entities", 20, command_list) ||
command_add("listpetition", "- List petitions", 50, command_listpetition) ||
command_add("load_shared_memory", "[shared_memory_name] - Reloads shared memory and uses the input as output", 250, command_load_shared_memory) ||
command_add("loc", "- Print out your or your target's current location and heading", 0, command_loc) ||
command_add("lock", "- Lock the worldserver", 150, command_lock) ||
command_add("logs", "Manage anything to do with logs", 250, command_logs) ||
command_add("logtest", "Performs log performance testing.", 250, command_logtest) ||
command_add("makepet", "[level] [class] [race] [texture] - Make a pet", 50, command_makepet) ||
command_add("mana", "- Fill your or your target's mana", 50, command_mana) ||
command_add("maxskills", "Maxes skills for you.", 200, command_max_all_skills) ||
command_add("memspell", "[slotid] [spellid] - Memorize spellid in the specified slot", 50, command_memspell) ||
command_add("merchant_close_shop", "Closes a merchant shop", 100, command_merchantcloseshop) ||
command_add("merchant_open_shop", "Opens a merchants shop", 100, command_merchantopenshop) ||
command_add("modifynpcstat", "- Modifys a NPC's stats", 150, command_modifynpcstat) ||
command_add("motd", "[new motd] - Set message of the day", 150, command_motd) ||
command_add("movechar", "[charname] [zonename] - Move charname to zonename", 50, command_movechar) ||
command_add("movement", "Various movement commands", 200, command_movement) ||
command_add("myskills", "- Show details about your current skill levels", 0, command_myskills) ||
command_add("mysqltest", "Akkadius MySQL Bench Test", 250, command_mysqltest) ||
command_add("mysql", "Mysql CLI, see 'help' for options.", 250, command_mysql) ||
command_add("mystats", "- Show details about you or your pet", 50, command_mystats) ||
command_add("name", "[newname] - Rename your player target", 150, command_name) ||
command_add("netstats", "- Gets the network stats for a stream.", 200, command_netstats) ||
command_add("network", "- Admin commands for the udp network interface.", 250, command_network) ||
command_add("npccast", "[targetname/entityid] [spellid] - Causes NPC target to cast spellid on targetname/entityid", 80, command_npccast) ||
command_add("npcedit", "[column] [value] - Mega NPC editing command", 100, command_npcedit) ||
command_add("npceditmass", "[name-search] [column] [value] - Mass (Zone wide) NPC data editing command", 100, command_npceditmass) ||
command_add("npcemote", "[message] - Make your NPC target emote a message.", 150, command_npcemote) ||
command_add("npcloot", "[show/money/add/remove] [itemid/all/money: pp gp sp cp] - Manipulate the loot an NPC is carrying", 80, command_npcloot) ||
command_add("npcsay", "[message] - Make your NPC target say a message.", 150, command_npcsay) ||
command_add("npcshout", "[message] - Make your NPC target shout a message.", 150, command_npcshout) ||
command_add("npcspawn", "[create/add/update/remove/delete] - Manipulate spawn DB", 170, command_npcspawn) ||
command_add("npcspecialattk", "[flagchar] [perm] - Set NPC special attack flags. Flags are E(nrage) F(lurry) R(ampage) S(ummon).", 80, command_npcspecialattk) ||
command_add("npcstats", "- Show stats about target NPC", 80, command_npcstats) ||
command_add("npctype_cache", "[id] or all - Clears the npc type cache for either the id or all npcs.", 250, command_npctype_cache) ||
command_add("npctypespawn", "[npctypeid] [factionid] - Spawn an NPC from the db", 10, command_npctypespawn) ||
command_add("nudge", "- Nudge your target's current position by specific values", 80, command_nudge) ||
command_add("nukebuffs", "- Strip all buffs on you or your target", 50, command_nukebuffs) ||
command_add("nukeitem", "[itemid] - Remove itemid from your player target's inventory", 150, command_nukeitem) ||
command_add("object", "List|Add|Edit|Move|Rotate|Copy|Save|Undo|Delete - Manipulate static and tradeskill objects within the zone", 100, command_object) ||
command_add("oocmute", "[1/0] - Mutes OOC chat", 200, command_oocmute) ||
command_add("opcode", "- opcode management", 250, command_opcode) ||
#ifdef PACKET_PROFILER
command_add("packetprofile", "- Dump packet profile for target or self.", 250, command_packetprofile) ||
#endif
command_add("path", "- view and edit pathing", 200, command_path) ||
command_add("peekinv", "[equip/gen/cursor/poss/limbo/curlim/trib/bank/shbank/allbank/trade/world/all] - Print out contents of your player target's inventory", 100, command_peekinv) ||
command_add("peqzone", "[zonename] - Go to specified zone, if you have > 75% health", 0, command_peqzone) ||
command_add("permaclass", "[classnum] - Change your or your player target's class (target is disconnected)", 80, command_permaclass) ||
command_add("permagender", "[gendernum] - Change your or your player target's gender (zone to take effect)", 80, command_permagender) ||
command_add("permarace", "[racenum] - Change your or your player target's race (zone to take effect)", 80, command_permarace) ||
command_add("petitioninfo", "[petition number] - Get info about a petition", 20, command_petitioninfo) ||
command_add("pf", "- Display additional mob coordinate and wandering data", 0, command_pf) ||
command_add("picklock", "Analog for ldon pick lock for the newer clients since we still don't have it working.", 0, command_picklock) ||
command_add("profanity", "Manage censored language.", 150, command_profanity) ||
#ifdef EQPROFILE
command_add("profiledump", "- Dump profiling info to logs", 250, command_profiledump) ||
command_add("profilereset", "- Reset profiling info", 250, command_profilereset) ||
#endif
command_add("push", "Lets you do spell push", 150, command_push) ||
command_add("proximity", "Shows NPC proximity", 150, command_proximity) ||
command_add("pvp", "[on/off] - Set your or your player target's PVP status", 100, command_pvp) ||
command_add("qglobal", "[on/off/view] - Toggles qglobal functionality on an NPC", 100, command_qglobal) ||
command_add("questerrors", "Shows quest errors.", 100, command_questerrors) ||
command_add("race", "[racenum] - Change your or your target's race. Use racenum 0 to return to normal", 50, command_race) ||
command_add("raidloot", "LEADER|GROUPLEADER|SELECTED|ALL - Sets your raid loot settings if you have permission to do so.", 0, command_raidloot) ||
command_add("randomfeatures", "- Temporarily randomizes the Facial Features of your target", 80, command_randomfeatures) ||
command_add("refreshgroup", "- Refreshes Group.", 0, command_refreshgroup) ||
command_add("reloadaa", "Reloads AA data", 200, command_reloadaa) ||
command_add("reloadallrules", "Executes a reload of all rules.", 80, command_reloadallrules) ||
command_add("reloademote", "Reloads NPC Emotes", 80, command_reloademote) ||
command_add("reloadlevelmods", nullptr, 255, command_reloadlevelmods) ||
command_add("reloadmerchants", nullptr, 255, command_reloadmerchants) ||
command_add("reloadperlexportsettings", nullptr, 255, command_reloadperlexportsettings) ||
command_add("reloadqst", " - Clear quest cache (any argument causes it to also stop all timers)", 150, command_reloadqst) ||
command_add("reloadrulesworld", "Executes a reload of all rules in world specifically.", 80, command_reloadworldrules) ||
command_add("reloadstatic", "- Reload Static Zone Data", 150, command_reloadstatic) ||
command_add("reloadtraps", "- Repops all traps in the current zone.", 80, command_reloadtraps) ||
command_add("reloadtitles", "- Reload player titles from the database", 150, command_reloadtitles) ||
command_add("reloadworld", "[0|1] - Clear quest cache (0 - no repop, 1 - repop)", 255, command_reloadworld) ||
command_add("reloadzps", "- Reload zone points from database", 150, command_reloadzps) ||
command_add("repop", "[delay] - Repop the zone with optional delay", 100, command_repop) ||
command_add("repopclose", "[distance in units] Repops only NPC's nearby for fast development purposes", 100, command_repopclose) ||
command_add("resetaa", "- Resets a Player's AA in their profile and refunds spent AA's to unspent, may disconnect player.", 200, command_resetaa) ||
command_add("resetaa_timer", "Command to reset AA cooldown timers.", 200, command_resetaa_timer) ||
command_add("revoke", "[charname] [1/0] - Makes charname unable to talk on OOC", 200, command_revoke) ||
command_add("roambox", "Manages roambox settings for an NPC", 200, command_roambox) ||
command_add("rules", "(subcommand) - Manage server rules", 250, command_rules) ||
command_add("save", "- Force your player or player corpse target to be saved to the database", 50, command_save) ||
command_add("scale", "- Handles npc scaling", 150, command_scale) ||
command_add("scribespell", "[spellid] - Scribe specified spell in your target's spell book.", 180, command_scribespell) ||
command_add("scribespells", "[max level] [min level] - Scribe all spells for you or your player target that are usable by them, up to level specified. (may freeze client for a few seconds)", 150, command_scribespells) ||
command_add("sendzonespawns", "- Refresh spawn list for all clients in zone", 150, command_sendzonespawns) ||
command_add("sensetrap", "Analog for ldon sense trap for the newer clients since we still don't have it working.", 0, command_sensetrap) ||
command_add("serverinfo", "- Get OS info about server host", 200, command_serverinfo) ||
command_add("serverrules", "- Read this server's rules", 0, command_serverrules) ||
command_add("setaapts", "[value] - Set your or your player target's available AA points", 100, command_setaapts) ||
command_add("setaaxp", "[value] - Set your or your player target's AA experience", 100, command_setaaxp) ||
command_add("setadventurepoints", "- Set your or your player target's available adventure points", 150, command_set_adventure_points) ||
command_add("setanim", "[animnum] - Set target's appearance to animnum", 200, command_setanim) ||
command_add("setcrystals", "[value] - Set your or your player target's available radiant or ebon crystals", 100, command_setcrystals) ||
command_add("setfaction", "[faction number] - Sets targeted NPC's faction in the database", 170, command_setfaction) ||
command_add("setgraveyard", "[zone name] - Creates a graveyard for the specified zone based on your target's LOC.", 200, command_setgraveyard) ||
command_add("setlanguage", "[language ID] [value] - Set your target's language skillnum to value", 50, command_setlanguage) ||
command_add("setlsinfo", "[email] [password] - Set login server email address and password (if supported by login server)", 10, command_setlsinfo) ||
command_add("setpass", "[accountname] [password] - Set local password for accountname", 150, command_setpass) ||
command_add("setpvppoints", "[value] - Set your or your player target's PVP points", 100, command_setpvppoints) ||
command_add("setskill", "[skillnum] [value] - Set your target's skill skillnum to value", 50, command_setskill) ||
command_add("setskillall", "[value] - Set all of your target's skills to value", 50, command_setskillall) ||
command_add("setstartzone", "[zoneid] - Set target's starting zone. Set to zero to allow the player to use /setstartcity", 80, command_setstartzone) ||
command_add("setstat", "- Sets the stats to a specific value.", 255, command_setstat) ||
command_add("setxp", "[value] - Set your or your player target's experience", 100, command_setxp) ||
command_add("showbonusstats", "[item|spell|all] Shows bonus stats for target from items or spells. Shows both by default.", 50, command_showbonusstats) ||
command_add("showbuffs", "- List buffs active on your target or you if no target", 50, command_showbuffs) ||
command_add("shownumhits", "Shows buffs numhits for yourself.", 0, command_shownumhits) ||
command_add("shownpcgloballoot", "Show GlobalLoot entires on this npc", 50, command_shownpcgloballoot) ||
command_add("showskills", "- Show the values of your or your player target's skills", 50, command_showskills) ||
command_add("showspellslist", "Shows spell list of targeted NPC", 100, command_showspellslist) ||
command_add("showstats", "- Show details about you or your target", 50, command_showstats) ||
command_add("showzonegloballoot", "Show GlobalLoot entires on this zone", 50, command_showzonegloballoot) ||
command_add("shutdown", "- Shut this zone process down", 150, command_shutdown) ||
command_add("size", "[size] - Change size of you or your target", 50, command_size) ||
command_add("spawn", "[name] [race] [level] [material] [hp] [gender] [class] [priweapon] [secweapon] [merchantid] - Spawn an NPC", 10, command_spawn) ||
command_add("spawneditmass", "Mass editing spawn command", 150, command_spawneditmass) ||
command_add("spawnfix", "- Find targeted NPC in database based on its X/Y/heading and update the database to make it spawn at your current location/heading.", 170, command_spawnfix) ||
command_add("spawnstatus", "- Show respawn timer status", 100, command_spawnstatus) ||
command_add("spellinfo", "[spellid] - Get detailed info about a spell", 10, command_spellinfo) ||
command_add("spoff", "- Sends OP_ManaChange", 80, command_spoff) ||
command_add("spon", "- Sends OP_MemorizeSpell", 80, command_spon) ||
command_add("stun", "[duration] - Stuns you or your target for duration", 100, command_stun) ||
command_add("summon", "[charname] - Summons your player/npc/corpse target, or charname if specified", 80, command_summon) ||
command_add("summonburiedplayercorpse", "- Summons the target's oldest buried corpse, if any exist.", 100, command_summonburiedplayercorpse) ||
command_add("summonitem", "[itemid] [charges] - Summon an item onto your cursor. Charges are optional.", 200, command_summonitem) ||
command_add("suspend", "[name] [days] [reason] - Suspend by character name and for specificed number of days", 150, command_suspend) ||
command_add("task", "(subcommand) - Task system commands", 150, command_task) ||
command_add("tattoo", "- Change the tattoo of your target (Drakkin Only)", 80, command_tattoo) ||
command_add("tempname", "[newname] - Temporarily renames your target. Leave name blank to restore the original name.", 100, command_tempname) ||
command_add("petname", "[newname] - Temporarily renames your pet. Leave name blank to restore the original name.", 100, command_petname) ||
command_add("test", "Test command", 200, command_test) ||
command_add("texture", "[texture] [helmtexture] - Change your or your target's appearance, use 255 to show equipment", 10, command_texture) ||
command_add("time", "[HH] [MM] - Set EQ time", 90, command_time) ||
command_add("timers", "- Display persistent timers for target", 200, command_timers) ||
command_add("timezone", "[HH] [MM] - Set timezone. Minutes are optional", 90, command_timezone) ||
command_add("title", "[text] [1 = create title table row] - Set your or your player target's title", 50, command_title) ||
command_add("titlesuffix", "[text] [1 = create title table row] - Set your or your player target's title suffix", 50, command_titlesuffix) ||
command_add("traindisc", "[level] - Trains all the disciplines usable by the target, up to level specified. (may freeze client for a few seconds)", 150, command_traindisc) ||
command_add("trapinfo", "- Gets infomation about the traps currently spawned in the zone.", 81, command_trapinfo) ||
command_add("tune", "Calculate ideal statical values related to combat.", 100, command_tune) ||
command_add("ucs", "- Attempts to reconnect to the UCS server", 0, command_ucs) ||
command_add("undyeme", "- Remove dye from all of your armor slots", 0, command_undyeme) ||
command_add("unfreeze", "- Unfreeze your target", 80, command_unfreeze) ||
command_add("unlock", "- Unlock the worldserver", 150, command_unlock) ||
command_add("unscribespell", "[spellid] - Unscribe specified spell from your target's spell book.", 180, command_unscribespell) ||
command_add("unscribespells", "- Clear out your or your player target's spell book.", 180, command_unscribespells) ||
command_add("untraindisc", "[spellid] - Untrain specified discipline from your target.", 180, command_untraindisc) ||
command_add("untraindiscs", "- Untrains all disciplines from your target.", 180, command_untraindiscs) ||
command_add("uptime", "[zone server id] - Get uptime of worldserver, or zone server if argument provided", 10, command_uptime) ||
command_add("version", "- Display current version of EQEmu server", 0, command_version) ||
command_add("viewnpctype", "[npctype id] - Show info about an npctype", 100, command_viewnpctype) ||
command_add("viewpetition", "[petition number] - View a petition", 20, command_viewpetition) ||
command_add("wc", "[wear slot] [material] - Sends an OP_WearChange for your target", 200, command_wc) ||
command_add("weather", "[0/1/2/3] (Off/Rain/Snow/Manual) - Change the weather", 80, command_weather) ||
command_add("who", "[search]", 20, command_who) ||
command_add("worldshutdown", "- Shut down world and all zones", 200, command_worldshutdown) ||
command_add("wp", "[add/delete] [grid_num] [pause] [wp_num] [-h] - Add/delete a waypoint to/from a wandering grid", 170, command_wp) ||
command_add("wpadd", "[pause] [-h] - Add your current location as a waypoint to your NPC target's AI path", 170, command_wpadd) ||
command_add("wpinfo", "- Show waypoint info about your NPC target", 170, command_wpinfo) ||
command_add("xtargets", "Show your targets Extended Targets and optionally set how many xtargets they can have.", 250, command_xtargets) ||
command_add("zclip", "[min] [max] - modifies and resends zhdr packet", 80, command_zclip) ||
command_add("zcolor", "[red] [green] [blue] - Change sky color", 80, command_zcolor) ||
command_add("zheader", "[zonename] - Load zheader for zonename from the database", 80, command_zheader) ||
command_add("zone", "[zonename] [x] [y] [z] - Go to specified zone (coords optional)", 50, command_zone) ||
command_add("zonebootup", "[ZoneServerID] [shortname] - Make a zone server boot a specific zone", 150, command_zonebootup) ||
command_add("zoneinstance", "[instanceid] [x] [y] [z] - Go to specified instance zone (coords optional)", 50, command_zone_instance) ||
command_add("zonelock", "[list/lock/unlock] - Set/query lock flag for zoneservers", 100, command_zonelock) ||
command_add("zoneshutdown", "[shortname] - Shut down a zone server", 150, command_zoneshutdown) ||
command_add("zonespawn", "- Not implemented", 250, command_zonespawn) ||
command_add("zonestatus", "- Show connected zoneservers, synonymous with /servers", 150, command_zonestatus) ||
command_add("zopp", "Troubleshooting command - Sends a fake item packet to you. No server reference is created.", 250, command_zopp) ||
command_add("zsafecoords", "[x] [y] [z] - Set safe coords", 80, command_zsafecoords) ||
command_add("zsave", " - Saves zheader to the database", 80, command_zsave) ||
command_add("zsky", "[skytype] - Change zone sky type", 80, command_zsky) ||
command_add("zstats", "- Show info about zone header", 80, command_zstats) ||
command_add("zunderworld", "[zcoord] - Sets the underworld using zcoord", 80, command_zunderworld) ||
command_add("zuwcoords", "[z coord] - Set underworld coord", 80, command_zuwcoords)
) {
command_deinit();
return -1;
}
std::map<std::string, std::pair<uint8, std::vector<std::string>>> command_settings;
database.GetCommandSettings(command_settings);
std::vector<std::pair<std::string, uint8>> injected_command_settings;
std::vector<std::string> orphaned_command_settings;
for (auto cs_iter : command_settings) {
auto cl_iter = commandlist.find(cs_iter.first);
if (cl_iter == commandlist.end()) {
orphaned_command_settings.push_back(cs_iter.first);
LogInfo(
"Command [{}] no longer exists... Deleting orphaned entry from `command_settings` table...",
cs_iter.first.c_str()
);
}
}
if (orphaned_command_settings.size()) {
if (!database.UpdateOrphanedCommandSettings(orphaned_command_settings)) {
LogInfo("Failed to process 'Orphaned Commands' update operation.");
}
}
auto working_cl = commandlist;
for (auto working_cl_iter : working_cl) {
auto cs_iter = command_settings.find(working_cl_iter.first);
if (cs_iter == command_settings.end()) {
injected_command_settings.push_back(std::pair<std::string, uint8>(working_cl_iter.first, working_cl_iter.second->access));
LogInfo(
"New Command [{}] found... Adding to `command_settings` table with access [{}]...",
working_cl_iter.first.c_str(),
working_cl_iter.second->access
);
if (working_cl_iter.second->access == 0) {
LogCommands(
"command_init(): Warning: Command [{}] defaulting to access level 0!",
working_cl_iter.first.c_str()
);
}
continue;
}
working_cl_iter.second->access = cs_iter->second.first;
LogCommands(
"command_init(): - Command [{}] set to access level [{}]",
working_cl_iter.first.c_str(),
cs_iter->second.first
);
if (cs_iter->second.second.empty()) {
continue;
}
for (auto alias_iter : cs_iter->second.second) {
if (alias_iter.empty()) {
continue;
}
if (commandlist.find(alias_iter) != commandlist.end()) {
LogCommands(
"command_init(): Warning: Alias [{}] already exists as a command - skipping!",
alias_iter.c_str()
);
continue;
}
commandlist[alias_iter] = working_cl_iter.second;
commandaliases[alias_iter] = working_cl_iter.first;
LogCommands(
"command_init(): - Alias [{}] added to command [{}]",
alias_iter.c_str(),
commandaliases[alias_iter].c_str()
);
}
}
if (injected_command_settings.size()) {
if (!database.UpdateInjectedCommandSettings(injected_command_settings)) {
LogInfo("Failed to process 'Injected Commands' update operation.");
}
}
command_dispatch = command_realdispatch;
return commandcount;
}
/*
* command_deinit
* clears the command list, freeing resources
*
* Parameters:
* none
*
*/
void command_deinit(void)
{
commandlist.clear();
commandaliases.clear();
command_dispatch = command_notavail;
commandcount = 0;
}
/*
* command_add
* adds a command to the command list; used by command_init
*
* Parameters:
* command_name - the command ex: "spawn"
* desc - text description of command for #help
* access - default access level required to use command
* function - pointer to function that handles command
*
*/
int command_add(std::string command_name, const char *desc, int access, CmdFuncPtr function)
{
if (command_name.empty()) {
LogError("command_add() - Command added with empty name string - check command.cpp");
return -1;
}
if (function == nullptr) {
LogError("command_add() - Command [{}] added without a valid function pointer - check command.cpp", command_name.c_str());
return -1;
}
if (commandlist.count(command_name) != 0) {
LogError("command_add() - Command [{}] is a duplicate command name - check command.cpp", command_name.c_str());
return -1;
}
for (auto iter = commandlist.begin(); iter != commandlist.end(); ++iter) {
if (iter->second->function != function)
continue;
LogError("command_add() - Command [{}] equates to an alias of [{}] - check command.cpp", command_name.c_str(), iter->first.c_str());
return -1;
}
auto c = new CommandRecord;
c->access = access;
c->desc = desc;
c->function = function;
commandlist[command_name] = c;
commandaliases[command_name] = command_name;
cleanup_commandlist.Append(c);
commandcount++;
return 0;
}
/*
*
* command_realdispatch
* Calls the correct function to process the client's command string.
* Called from Client::ChannelMessageReceived if message starts with
* command character (#).
*
* Parameters:
* c - pointer to the calling client object
* message - what the client typed
*
*/
int command_realdispatch(Client *c, const char *message)
{
Seperator sep(message, ' ', 10, 100, true); // "three word argument" should be considered 1 arg
command_logcommand(c, message);
std::string cstr(sep.arg[0]+1);
if(commandlist.count(cstr) != 1) {
return(-2);
}
CommandRecord *cur = commandlist[cstr];
if(c->Admin() < cur->access){
c->Message(Chat::Red,"Your access level is not high enough to use this command.");
return(-1);
}
/* QS: Player_Log_Issued_Commands */
if (RuleB(QueryServ, PlayerLogIssuedCommandes)){
std::string event_desc = StringFormat("Issued command :: '%s' in zoneid:%i instid:%i", message, c->GetZoneID(), c->GetInstanceID());
QServ->PlayerLogEvent(Player_Log_Issued_Commands, c->CharacterID(), event_desc);
}
if(cur->access >= COMMANDS_LOGGING_MIN_STATUS) {
LogCommands("[{}] ([{}]) used command: [{}] (target=[{}])", c->GetName(), c->AccountName(), message, c->GetTarget()?c->GetTarget()->GetName():"NONE");
}
if(cur->function == nullptr) {
LogError("Command [{}] has a null function\n", cstr.c_str());
return(-1);
} else {
//dispatch C++ command
cur->function(c, &sep); // dispatch command
}
return 0;
}
void command_logcommand(Client *c, const char *message)
{
int admin=c->Admin();
bool continueevents=false;
switch (zone->loglevelvar){ //catch failsafe
case 9: { // log only LeadGM
if ((admin>= 150) && (admin <200))
continueevents=true;
break;
}
case 8: { // log only GM
if ((admin>= 100) && (admin <150))
continueevents=true;
break;
}
case 1: {
if ((admin>= 200))
continueevents=true;
break;
}
case 2: {
if ((admin>= 150))
continueevents=true;
break;
}
case 3: {
if ((admin>= 100))
continueevents=true;
break;
}
case 4: {
if ((admin>= 80))
continueevents=true;
break;
}
case 5: {
if ((admin>= 20))
continueevents=true;
break;
}
case 6: {
if ((admin>= 10))
continueevents=true;
break;
}
case 7: {
continueevents=true;
break;
}
}
if (continueevents)
database.logevents(
c->AccountName(),
c->AccountID(),
admin,c->GetName(),
c->GetTarget()?c->GetTarget()->GetName():"None",
"Command",
message,
1
);
}
/*
* commands go below here
*/
void command_setstat(Client* c, const Seperator* sep){
if(sep->arg[1][0] && sep->arg[2][0] && c->GetTarget()!=0 && c->GetTarget()->IsClient()){
c->GetTarget()->CastToClient()->SetStats(atoi(sep->arg[1]),atoi(sep->arg[2]));
}
else{
c->Message(Chat::White,"This command is used to permanently increase or decrease a players stats.");
c->Message(Chat::White,"Usage: #setstat {type} {value the stat should be}");
c->Message(Chat::White,"Types: Str: 0, Sta: 1, Agi: 2, Dex: 3, Int: 4, Wis: 5, Cha: 6");
}
}
void command_incstat(Client* c, const Seperator* sep){
if(sep->arg[1][0] && sep->arg[2][0] && c->GetTarget()!=0 && c->GetTarget()->IsClient()){
c->GetTarget()->CastToClient()->IncStats(atoi(sep->arg[1]),atoi(sep->arg[2]));
}
else{
c->Message(Chat::White,"This command is used to permanently increase or decrease a players stats.");
c->Message(Chat::White,"Usage: #setstat {type} {value by which to increase or decrease}");
c->Message(Chat::White,"Note: The value is in increments of 2, so a value of 3 will actually increase the stat by 6");
c->Message(Chat::White,"Types: Str: 0, Sta: 1, Agi: 2, Dex: 3, Int: 4, Wis: 5, Cha: 6");
}
}
void command_resetaa(Client* c,const Seperator *sep) {
if(c->GetTarget() && c->GetTarget()->IsClient()){
c->GetTarget()->CastToClient()->ResetAA();
c->Message(Chat::Red,"Successfully reset %s's AAs", c->GetTarget()->GetName());
}
else
c->Message(Chat::White,"Usage: Target a client and use #resetaa to reset the AA data in their Profile.");
}
void command_help(Client *c, const Seperator *sep)
{
int commands_shown=0;
c->Message(Chat::White, "Available EQEMu commands:");
std::map<std::string, CommandRecord *>::iterator cur,end;
cur = commandlist.begin();
end = commandlist.end();
for(; cur != end; ++cur) {
if(sep->arg[1][0]) {
if(cur->first.find(sep->arg[1]) == std::string::npos) {
continue;
}
}
if(c->Admin() < cur->second->access)
continue;
commands_shown++;
c->Message(Chat::White, " %c%s %s", COMMAND_CHAR, cur->first.c_str(), cur->second->desc == nullptr?"":cur->second->desc);
}
c->Message(Chat::White, "%d command%s listed.", commands_shown, commands_shown!=1?"s":"");
}
void command_version(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "Current version information.");
c->Message(Chat::White, " %s", CURRENT_VERSION);
c->Message(Chat::White, " Compiled on: %s at %s", COMPILE_DATE, COMPILE_TIME);
c->Message(Chat::White, " Last modified on: %s", LAST_MODIFIED);
}
void command_setfaction(Client *c, const Seperator *sep)
{
if((sep->arg[1][0] == 0 || strcasecmp(sep->arg[1],"*")==0) || ((c->GetTarget()==0) || (c->GetTarget()->IsClient()))) {
c->Message(Chat::White, "Usage: #setfaction [faction number]");
return;
}
auto npcTypeID = c->GetTarget()->CastToNPC()->GetNPCTypeID();
c->Message(Chat::Yellow,"Setting NPC %u to faction %i", npcTypeID, atoi(sep->argplus[1]));
std::string query = StringFormat("UPDATE npc_types SET npc_faction_id = %i WHERE id = %i",
atoi(sep->argplus[1]), npcTypeID);
database.QueryDatabase(query);
}
void command_serversidename(Client *c, const Seperator *sep)
{
if(c->GetTarget())
c->Message(Chat::White, c->GetTarget()->GetName());
else
c->Message(Chat::White, "Error: no target");
}
void command_wc(Client *c, const Seperator *sep)
{
if (sep->argnum < 2) {
c->Message(
0,
"Usage: #wc [wear slot] [material] [ [hero_forge_model] [elite_material] [unknown06] [unknown18] ]"
);
}
else if (c->GetTarget() == nullptr) {
c->Message(Chat::Red, "You must have a target to do a wear change.");
}
else {
uint32 hero_forge_model = 0;
uint32 wearslot = atoi(sep->arg[1]);
// Hero Forge
if (sep->argnum > 2) {
hero_forge_model = atoi(sep->arg[3]);
if (hero_forge_model != 0 && hero_forge_model < 1000) {
// Shorthand Hero Forge ID. Otherwise use the value the user entered.
hero_forge_model = (hero_forge_model * 100) + wearslot;
}
}
/*
// Leaving here to add color option to the #wc command eventually
uint32 Color;
if (c->GetTarget()->IsClient())
Color = c->GetTarget()->GetEquipmentColor(atoi(sep->arg[1]));
else
Color = c->GetTarget()->GetArmorTint(atoi(sep->arg[1]));
*/
c->GetTarget()->SendTextureWC(
wearslot,
atoi(sep->arg[2]),
hero_forge_model,
atoi(sep->arg[4]),
atoi(sep->arg[5]),
atoi(sep->arg[6]));
}
}
void command_heromodel(Client *c, const Seperator *sep)
{
if (sep->argnum < 1) {
c->Message(Chat::White, "Usage: #heromodel [hero forge model] [ [slot] ] (example: #heromodel 63)");
}
else if (c->GetTarget() == nullptr) {
c->Message(Chat::Red, "You must have a target to do a wear change for Hero's Forge Models.");
}
else {
uint32 hero_forge_model = atoi(sep->arg[1]);
if (sep->argnum > 1) {
uint8 wearslot = (uint8) atoi(sep->arg[2]);
c->GetTarget()->SendTextureWC(wearslot, 0, hero_forge_model, 0, 0, 0);
}
else {
if (hero_forge_model > 0) {
// Conversion to simplify the command arguments
// Hero's Forge model is actually model * 1000 + texture * 100 + wearslot
// Hero's Forge Model slot 7 is actually for Robes, but it still needs to use wearslot 1 in the packet
hero_forge_model *= 100;
for (uint8 wearslot = 0; wearslot < 7; wearslot++) {
c->GetTarget()->SendTextureWC(wearslot, 0, (hero_forge_model + wearslot), 0, 0, 0);
}
}
else {
c->Message(Chat::Red, "Hero's Forge Model must be greater than 0.");
}
}
}
}
void command_setanim(Client *c, const Seperator *sep)
{
if (c->GetTarget() && sep->IsNumber(1)) {
int num = atoi(sep->arg[1]);
if (num < 0 || num >= _eaMaxAppearance) {
c->Message(Chat::White, "Invalid animation number, between 0 and %d", _eaMaxAppearance - 1);
}
c->GetTarget()->SetAppearance(EmuAppearance(num));
}
else {
c->Message(Chat::White, "Usage: #setanim [animnum]");
}
}
void command_serverinfo(Client *c, const Seperator *sep)
{
auto os = EQ::GetOS();
auto cpus = EQ::GetCPUs();
auto pid = EQ::GetPID();
auto rss = EQ::GetRSS();
auto uptime = EQ::GetUptime();
c->Message(Chat::White, "Operating System Information");
c->Message(Chat::White, "==================================================");
c->Message(Chat::White, "System: %s", os.sysname.c_str());
c->Message(Chat::White, "Release: %s", os.release.c_str());
c->Message(Chat::White, "Version: %s", os.version.c_str());
c->Message(Chat::White, "Machine: %s", os.machine.c_str());
c->Message(Chat::White, "Uptime: %.2f seconds", uptime);
c->Message(Chat::White, "==================================================");
c->Message(Chat::White, "CPU Information");
c->Message(Chat::White, "==================================================");
for (size_t i = 0; i < cpus.size(); ++i) {
auto &cp = cpus[i];
c->Message(Chat::White, "CPU #%i: %s, Speed: %.2fGhz", i, cp.model.c_str(), cp.speed);
}
c->Message(Chat::White, "==================================================");
c->Message(Chat::White, "Process Information");
c->Message(Chat::White, "==================================================");
c->Message(Chat::White, "PID: %u", pid);
c->Message(Chat::White, "RSS: %.2f MB", rss / 1048576.0);
c->Message(Chat::White, "==================================================");
}
void command_getvariable(Client *c, const Seperator *sep)
{
std::string tmp;
if (database.GetVariable(sep->argplus[1], tmp))
c->Message(Chat::White, "%s = %s", sep->argplus[1], tmp.c_str());
else
c->Message(Chat::White, "GetVariable(%s) returned false", sep->argplus[1]);
}
void command_chat(Client *c, const Seperator *sep)
{
if (sep->arg[2][0] == 0)
c->Message(Chat::White, "Usage: #chat [channum] [message]");
else
if (!worldserver.SendChannelMessage(0, 0, (uint8) atoi(sep->arg[1]), 0, 0, 100, sep->argplus[2]))
c->Message(Chat::White, "Error: World server disconnected");
}
void command_npcloot(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->Message(Chat::White, "Error: No target");
// #npcloot show
else if (strcasecmp(sep->arg[1], "show") == 0)
{
if (c->GetTarget()->IsNPC())
c->GetTarget()->CastToNPC()->QueryLoot(c);
else if (c->GetTarget()->IsCorpse())
c->GetTarget()->CastToCorpse()->QueryLoot(c);
else
c->Message(Chat::White, "Error: Target's type doesnt have loot");
}
// These 2 types are *BAD* for the next few commands
else if (c->GetTarget()->IsClient() || c->GetTarget()->IsCorpse())
c->Message(Chat::White, "Error: Invalid target type, try a NPC =).");
// #npcloot add
else if (strcasecmp(sep->arg[1], "add") == 0)
{
// #npcloot add item
if (c->GetTarget()->IsNPC() && sep->IsNumber(2))
{
uint32 item = atoi(sep->arg[2]);
if (database.GetItem(item))
{
if (sep->arg[3][0] != 0 && sep->IsNumber(3))
c->GetTarget()->CastToNPC()->AddItem(item, atoi(sep->arg[3]), 0);
else
c->GetTarget()->CastToNPC()->AddItem(item, 1, 0);
c->Message(Chat::White, "Added item(%i) to the %s's loot.", item, c->GetTarget()->GetName());
}
else
c->Message(Chat::White, "Error: #npcloot add: Item(%i) does not exist!", item);
}
else if (!sep->IsNumber(2))
c->Message(Chat::White, "Error: #npcloot add: Itemid must be a number.");
else
c->Message(Chat::White, "Error: #npcloot add: This is not a valid target.");
}
// #npcloot remove
else if (strcasecmp(sep->arg[1], "remove") == 0)
{
//#npcloot remove all
if (strcasecmp(sep->arg[2], "all") == 0)
c->Message(Chat::White, "Error: #npcloot remove all: Not yet implemented.");
//#npcloot remove itemid
else
{
if(c->GetTarget()->IsNPC() && sep->IsNumber(2))
{
uint32 item = atoi(sep->arg[2]);
c->GetTarget()->CastToNPC()->RemoveItem(item);
c->Message(Chat::White, "Removed item(%i) from the %s's loot.", item, c->GetTarget()->GetName());
}
else if (!sep->IsNumber(2))
c->Message(Chat::White, "Error: #npcloot remove: Item must be a number.");
else
c->Message(Chat::White, "Error: #npcloot remove: This is not a valid target.");
}
}
// #npcloot money
else if (strcasecmp(sep->arg[1], "money") == 0)
{
if (c->GetTarget()->IsNPC() && sep->IsNumber(2) && sep->IsNumber(3) && sep->IsNumber(4) && sep->IsNumber(5))
{
if ((atoi(sep->arg[2]) < 34465 && atoi(sep->arg[2]) >= 0) && (atoi(sep->arg[3]) < 34465 && atoi(sep->arg[3]) >= 0) && (atoi(sep->arg[4]) < 34465 && atoi(sep->arg[4]) >= 0) && (atoi(sep->arg[5]) < 34465 && atoi(sep->arg[5]) >= 0))
{
c->GetTarget()->CastToNPC()->AddCash(atoi(sep->arg[5]), atoi(sep->arg[4]), atoi(sep->arg[3]), atoi(sep->arg[2]));
c->Message(Chat::White, "Set %i Platinum, %i Gold, %i Silver, and %i Copper as %s's money.", atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), c->GetTarget()->GetName());
}
else
c->Message(Chat::White, "Error: #npcloot money: Values must be between 0-34465.");
}
else
c->Message(Chat::White, "Usage: #npcloot money platinum gold silver copper");
}
else
c->Message(Chat::White, "Usage: #npcloot [show/money/add/remove] [itemid/all/money: pp gp sp cp]");
}
void command_gm(Client *c, const Seperator *sep)
{
bool state=atobool(sep->arg[1]);
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0] != 0) {
t->SetGM(state);
c->Message(Chat::White, "%s is %s a GM.", t->GetName(), state?"now":"no longer");
}
else
c->Message(Chat::White, "Usage: #gm [on/off]");
}
// there's no need for this, as /summon already takes care of it
// this command is here for reference but it is not added to the
// list above
//To whoever wrote the above: And what about /kill, /zone, /zoneserver, etc?
//There is a reason for the # commands: so that admins can specifically enable certain
//commands for their users. Some might want users to #summon but not to /kill. Cant do that if they are a GM
void command_summon(Client *c, const Seperator *sep)
{
Mob *t;
if(sep->arg[1][0] != 0) // arg specified
{
Client* client = entity_list.GetClientByName(sep->arg[1]);
if (client != 0) // found player in zone
t=client->CastToMob();
else
{
if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected.");
else
{ // player is in another zone
//Taking this command out until we test the factor of 8 in ServerOP_ZonePlayer
//c->Message(Chat::White, "Summoning player from another zone not yet implemented.");
//return;
auto pack = new ServerPacket(ServerOP_ZonePlayer, sizeof(ServerZonePlayer_Struct));
ServerZonePlayer_Struct* szp = (ServerZonePlayer_Struct*) pack->pBuffer;
strcpy(szp->adminname, c->GetName());
szp->adminrank = c->Admin();
szp->ignorerestrictions = 2;
strcpy(szp->name, sep->arg[1]);
strcpy(szp->zone, zone->GetShortName());
szp->x_pos = c->GetX(); // May need to add a factor of 8 in here..
szp->y_pos = c->GetY();
szp->z_pos = c->GetZ();
szp->instance_id = zone->GetInstanceID();
worldserver.SendPacket(pack);
safe_delete(pack);
}
return;
}
}
else if(c->GetTarget()) // have target
t=c->GetTarget();
else
{
/*if(c->Admin() < 150)
c->Message(Chat::White, "You need a NPC/corpse target for this command");
else*/
c->Message(Chat::White, "Usage: #summon [charname] Either target or charname is required");
return;
}
if(!t)
return;
if (t->IsNPC())
{ // npc target
c->Message(Chat::White, "Summoning NPC %s to %1.1f, %1.1f, %1.1f", t->GetName(), c->GetX(), c->GetY(), c->GetZ());
t->CastToNPC()->GMMove(c->GetX(), c->GetY(), c->GetZ(), c->GetHeading());
t->CastToNPC()->SaveGuardSpot(glm::vec4(0.0f));
}
else if (t->IsCorpse())
{ // corpse target
c->Message(Chat::White, "Summoning corpse %s to %1.1f, %1.1f, %1.1f", t->GetName(), c->GetX(), c->GetY(), c->GetZ());
t->CastToCorpse()->GMMove(c->GetX(), c->GetY(), c->GetZ(), c->GetHeading());
}
else if (t->IsClient())
{
/*if(c->Admin() < 150)
{
c->Message(Chat::White, "You may not summon a player.");
return;
}*/
c->Message(Chat::White, "Summoning player %s to %1.1f, %1.1f, %1.1f", t->GetName(), c->GetX(), c->GetY(), c->GetZ());
t->CastToClient()->MovePC(zone->GetZoneID(), zone->GetInstanceID(), c->GetX(), c->GetY(), c->GetZ(), c->GetHeading(), 2, GMSummon);
}
}
void command_zone(Client *c, const Seperator *sep)
{
if(c->Admin() < commandZoneToCoords &&
(sep->IsNumber(2) || sep->IsNumber(3) || sep->IsNumber(4))) {
c->Message(Chat::White, "Your status is not high enough to zone to specific coordinates.");
return;
}
uint16 zoneid = 0;
if (sep->IsNumber(1))
{
if(atoi(sep->arg[1])==26 && (c->Admin() < commandZoneToSpecials)){ //cshome
c->Message(Chat::White, "Only Guides and above can goto that zone.");
return;
}
zoneid = atoi(sep->arg[1]);
}
else if (sep->arg[1][0] == 0)
{
c->Message(Chat::White, "Usage: #zone [zonename]");
c->Message(Chat::White, "Optional Usage: #zone [zonename] y x z");
return;
}
else if (zone->GetZoneID() == 184 && c->Admin() < commandZoneToSpecials) { // Zone: 'Load'
c->Message(Chat::White, "The Gods brought you here, only they can send you away.");
return;
} else {
if((strcasecmp(sep->arg[1], "cshome")==0) && (c->Admin() < commandZoneToSpecials)){
c->Message(Chat::White, "Only Guides and above can goto that zone.");
return;
}
zoneid = database.GetZoneID(sep->arg[1]);
if(zoneid == 0) {
c->Message(Chat::White, "Unable to locate zone '%s'", sep->arg[1]);
return;
}
}
#ifdef BOTS
// This block is necessary to clean up any bot objects owned by a Client
if(zoneid != c->GetZoneID())
Bot::ProcessClientZoneChange(c);
#endif
if (sep->IsNumber(2) || sep->IsNumber(3) || sep->IsNumber(4)){
//zone to specific coords
c->MovePC(zoneid, (float)atof(sep->arg[2]), atof(sep->arg[3]), atof(sep->arg[4]), 0.0f, 0);
}
else
//zone to safe coords
c->MovePC(zoneid, 0.0f, 0.0f, 0.0f, 0.0f, 0, ZoneToSafeCoords);
}
//todo: fix this so it checks if you're in the instance set
void command_zone_instance(Client *c, const Seperator *sep)
{
if(c->Admin() < commandZoneToCoords &&
(sep->IsNumber(2) || sep->IsNumber(3) || sep->IsNumber(4))) {
c->Message(Chat::White, "Your status is not high enough to zone to specific coordinates.");
return;
}
if (sep->arg[1][0] == 0)
{
c->Message(Chat::White, "Usage: #zoneinstance [instance id]");
c->Message(Chat::White, "Optional Usage: #zoneinstance [instance id] y x z");
return;
}
uint16 zoneid = 0;
uint16 instanceid = 0;
if(sep->IsNumber(1))
{
instanceid = atoi(sep->arg[1]);
if(!instanceid)
{
c->Message(Chat::White, "Must enter a valid instance id.");
return;
}
zoneid = database.ZoneIDFromInstanceID(instanceid);
if(!zoneid)
{
c->Message(Chat::White, "Instance not found or zone is set to null.");
return;
}
}
else
{
c->Message(Chat::White, "Must enter a valid instance id.");
return;
}
if(!database.VerifyInstanceAlive(instanceid, c->CharacterID()))
{
c->Message(Chat::White, "Instance ID expiried or you are not apart of this instance.");
return;
}
if (sep->IsNumber(2) || sep->IsNumber(3) || sep->IsNumber(4)){
//zone to specific coords
c->MovePC(zoneid, instanceid, atof(sep->arg[2]), atof(sep->arg[3]), atof(sep->arg[4]), 0.0f, 0);
}
else{
c->MovePC(zoneid, instanceid, 0.0f, 0.0f, 0.0f, 0.0f, 0, ZoneToSafeCoords);
}
}
void command_showbuffs(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->CastToMob()->ShowBuffs(c);
else
c->GetTarget()->CastToMob()->ShowBuffs(c);
}
void command_peqzone(Client *c, const Seperator *sep)
{
uint32 timeleft = c->GetPTimers().GetRemainingTime(pTimerPeqzoneReuse)/60;
if(!c->GetPTimers().Expired(&database, pTimerPeqzoneReuse, false)) {
c->Message(Chat::Red,"You must wait %i minute(s) before using this ability again.", timeleft);
return;
}
if(c->GetHPRatio() < 75) {
c->Message(Chat::White, "You cannot use this command with less than 75 percent health.");
return;
}
//this isnt perfect, but its better...
if(
c->IsInvisible(c)
|| c->IsRooted()
|| c->IsStunned()
|| c->IsMezzed()
|| c->AutoAttackEnabled()
|| c->GetInvul()
) {
c->Message(Chat::White, "You cannot use this command in your current state. Settle down and wait.");
return;
}
uint16 zoneid = 0;
uint8 destzone = 0;
if (sep->IsNumber(1))
{
zoneid = atoi(sep->arg[1]);
destzone = database.GetPEQZone(zoneid, 0);
if(destzone == 0){
c->Message(Chat::Red, "You cannot use this command to enter that zone!");
return;
}
if(zoneid == zone->GetZoneID()) {
c->Message(Chat::Red, "You cannot use this command on the zone you are in!");
return;
}
}
else if (sep->arg[1][0] == 0 || sep->IsNumber(2) || sep->IsNumber(3) || sep->IsNumber(4) || sep->IsNumber(5))
{
c->Message(Chat::White, "Usage: #peqzone [zonename]");
c->Message(Chat::White, "Optional Usage: #peqzone [zoneid]");
return;
}
else {
zoneid = database.GetZoneID(sep->arg[1]);
destzone = database.GetPEQZone(zoneid, 0);
if(zoneid == 0) {
c->Message(Chat::White, "Unable to locate zone '%s'", sep->arg[1]);
return;
}
if(destzone == 0){
c->Message(Chat::Red, "You cannot use this command to enter that zone!");
return;
}
if(zoneid == zone->GetZoneID()) {
c->Message(Chat::Red, "You cannot use this command on the zone you are in!");
return;
}
}
if(RuleB (Zone, UsePEQZoneDebuffs)){
c->SpellOnTarget(RuleI(Zone, PEQZoneDebuff1), c);
c->SpellOnTarget(RuleI(Zone, PEQZoneDebuff2), c);
}
//zone to safe coords
c->GetPTimers().Start(pTimerPeqzoneReuse, RuleI(Zone, PEQZoneReuseTime));
c->MovePC(zoneid, 0.0f, 0.0f, 0.0f, 0.0f, 0, ZoneToSafeCoords);
}
void command_movechar(Client *c, const Seperator *sep)
{
if(sep->arg[1][0]==0 || sep->arg[2][0] == 0)
c->Message(Chat::White, "Usage: #movechar [charactername] [zonename]");
else if (c->Admin() < commandMovecharToSpecials && strcasecmp(sep->arg[2], "cshome") == 0 || strcasecmp(sep->arg[2], "load") == 0 || strcasecmp(sep->arg[2], "load2") == 0)
c->Message(Chat::White, "Invalid zone name");
else
{
uint32 tmp = database.GetAccountIDByChar(sep->arg[1]);
if (tmp)
{
if (c->Admin() >= commandMovecharSelfOnly || tmp == c->AccountID())
if (!database.MoveCharacterToZone((char*) sep->arg[1], (char*) sep->arg[2]))
c->Message(Chat::White, "Character Move Failed!");
else
c->Message(Chat::White, "Character has been moved.");
else
c->Message(Chat::Red,"You cannot move characters that are not on your account.");
}
else
c->Message(Chat::White, "Character Does Not Exist");
}
}
void command_movement(Client *c, const Seperator *sep)
{
auto &mgr = MobMovementManager::Get();
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #movement stats/clearstats/walkto/runto/rotateto/stop/packet");
return;
}
if (strcasecmp(sep->arg[1], "stats") == 0)
{
mgr.DumpStats(c);
}
else if (strcasecmp(sep->arg[1], "clearstats") == 0)
{
mgr.ClearStats();
}
else if (strcasecmp(sep->arg[1], "walkto") == 0)
{
auto target = c->GetTarget();
if (target == nullptr) {
c->Message(Chat::White, "No target found.");
return;
}
target->WalkTo(c->GetX(), c->GetY(), c->GetZ());
}
else if (strcasecmp(sep->arg[1], "runto") == 0)
{
auto target = c->GetTarget();
if (target == nullptr) {
c->Message(Chat::White, "No target found.");
return;
}
target->RunTo(c->GetX(), c->GetY(), c->GetZ());
}
else if (strcasecmp(sep->arg[1], "rotateto") == 0)
{
auto target = c->GetTarget();
if (target == nullptr) {
c->Message(Chat::White, "No target found.");
return;
}
target->RotateToWalking(target->CalculateHeadingToTarget(c->GetX(), c->GetY()));
}
else if (strcasecmp(sep->arg[1], "stop") == 0)
{
auto target = c->GetTarget();
if (target == nullptr) {
c->Message(Chat::White, "No target found.");
return;
}
target->StopNavigation();
}
else if (strcasecmp(sep->arg[1], "packet") == 0)
{
auto target = c->GetTarget();
if (target == nullptr) {
c->Message(Chat::White, "No target found.");
return;
}
mgr.SendCommandToClients(target, atof(sep->arg[2]), atof(sep->arg[3]), atof(sep->arg[4]), atof(sep->arg[5]), atoi(sep->arg[6]), ClientRangeAny);
}
else {
c->Message(Chat::White, "Usage: #movement stats/clearstats/walkto/runto/rotateto/stop/packet");
}
}
void command_viewpetition(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #viewpetition (petition number) Type #listpetition for a list");
return;
}
c->Message(Chat::Red," ID : Character Name , Petition Text");
std::string query = "SELECT petid, charname, petitiontext FROM petitions ORDER BY petid";
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
LogInfo("View petition request from [{}], petition number: [{}]", c->GetName(), atoi(sep->argplus[1]) );
if (results.RowCount() == 0) {
c->Message(Chat::Red,"There was an error in your request: ID not found! Please check the Id and try again.");
return;
}
for (auto row = results.begin(); row != results.end(); ++row)
if (strcasecmp(row[0], sep->argplus[1]) == 0)
c->Message(Chat::Yellow, " %s: %s , %s ", row[0], row[1], row[2]);
}
void command_petitioninfo(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #petitioninfo (petition number) Type #listpetition for a list");
return;
}
std::string query = "SELECT petid, charname, accountname, zone, charclass, charrace, charlevel FROM petitions ORDER BY petid";
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
LogInfo("Petition information request from [{}], petition number:", c->GetName(), atoi(sep->argplus[1]) );
if (results.RowCount() == 0) {
c->Message(Chat::Red,"There was an error in your request: ID not found! Please check the Id and try again.");
return;
}
for (auto row = results.begin(); row != results.end(); ++row)
if (strcasecmp(row[0],sep->argplus[1])== 0)
c->Message(Chat::Red," ID : %s Character Name: %s Account Name: %s Zone: %s Character Class: %s Character Race: %s Character Level: %s", row[0],row[1],row[2],row[3],row[4],row[5],row[6]);
}
void command_delpetition(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0 || strcasecmp(sep->arg[1],"*") == 0) {
c->Message(Chat::White, "Usage: #delpetition (petition number) Type #listpetition for a list");
return;
}
c->Message(Chat::Red,"Attempting to delete petition number: %i", atoi(sep->argplus[1]));
std::string query = StringFormat("DELETE FROM petitions WHERE petid = %i", atoi(sep->argplus[1]));
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
LogInfo("Delete petition request from [{}], petition number:", c->GetName(), atoi(sep->argplus[1]) );
}
void command_listnpcs(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "Deprecated, use the #list command (#list npcs <search>)");
}
void command_list(Client *c, const Seperator *sep)
{
std::string search_type;
if (strcasecmp(sep->arg[1], "npcs") == 0) {
search_type = "npcs";
}
if (strcasecmp(sep->arg[1], "players") == 0) {
search_type = "players";
}
if (strcasecmp(sep->arg[1], "corpses") == 0) {
search_type = "corpses";
}
if (strcasecmp(sep->arg[1], "doors") == 0) {
search_type = "doors";
}
if (strcasecmp(sep->arg[1], "objects") == 0) {
search_type = "objects";
}
if (search_type.length() > 0) {
int entity_count = 0;
int found_count = 0;
std::string search_string;
if (sep->arg[2]) {
search_string = sep->arg[2];
}
/**
* NPC
*/
if (search_type.find("npcs") != std::string::npos) {
auto &entity_list_search = entity_list.GetMobList();
for (auto &itr : entity_list_search) {
Mob *entity = itr.second;
if (!entity->IsNPC()) {
continue;
}
entity_count++;
std::string entity_name = entity->GetName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
std::string saylink = StringFormat(
"#goto %.0f %0.f %.0f",
entity->GetX(),
entity->GetY(),
entity->GetZ());
c->Message(
0,
"| %s | ID %5d | %s | x %.0f | y %0.f | z %.0f",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Goto").c_str(),
entity->GetID(),
entity->GetName(),
entity->GetX(),
entity->GetY(),
entity->GetZ()
);
found_count++;
}
}
/**
* Client
*/
if (search_type.find("players") != std::string::npos) {
auto &entity_list_search = entity_list.GetClientList();
for (auto &itr : entity_list_search) {
Client *entity = itr.second;
entity_count++;
std::string entity_name = entity->GetName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
std::string saylink = StringFormat(
"#goto %.0f %0.f %.0f",
entity->GetX(),
entity->GetY(),
entity->GetZ());
c->Message(
0,
"| %s | ID %5d | %s | x %.0f | y %0.f | z %.0f",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Goto").c_str(),
entity->GetID(),
entity->GetName(),
entity->GetX(),
entity->GetY(),
entity->GetZ()
);
found_count++;
}
}
/**
* Corpse
*/
if (search_type.find("corpses") != std::string::npos) {
auto &entity_list_search = entity_list.GetCorpseList();
for (auto &itr : entity_list_search) {
Corpse *entity = itr.second;
entity_count++;
std::string entity_name = entity->GetName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
std::string saylink = StringFormat(
"#goto %.0f %0.f %.0f",
entity->GetX(),
entity->GetY(),
entity->GetZ());
c->Message(
0,
"| %s | ID %5d | %s | x %.0f | y %0.f | z %.0f",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Goto").c_str(),
entity->GetID(),
entity->GetName(),
entity->GetX(),
entity->GetY(),
entity->GetZ()
);
found_count++;
}
}
/**
* Doors
*/
if (search_type.find("doors") != std::string::npos) {
auto &entity_list_search = entity_list.GetDoorsList();
for (auto &itr : entity_list_search) {
Doors * entity = itr.second;
entity_count++;
std::string entity_name = entity->GetDoorName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
std::string saylink = StringFormat(
"#goto %.0f %0.f %.0f",
entity->GetX(),
entity->GetY(),
entity->GetZ());
c->Message(
0,
"| %s | Entity ID %5d | Door ID %i | %s | x %.0f | y %0.f | z %.0f",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Goto").c_str(),
entity->GetID(),
entity->GetDoorID(),
entity->GetDoorName(),
entity->GetX(),
entity->GetY(),
entity->GetZ()
);
found_count++;
}
}
/**
* Objects
*/
if (search_type.find("objects") != std::string::npos) {
auto &entity_list_search = entity_list.GetObjectList();
for (auto &itr : entity_list_search) {
Object * entity = itr.second;
entity_count++;
std::string entity_name = entity->GetModelName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
std::string saylink = StringFormat(
"#goto %.0f %0.f %.0f",
entity->GetX(),
entity->GetY(),
entity->GetZ());
c->Message(
0,
"| %s | Entity ID %5d | Object DBID %i | %s | x %.0f | y %0.f | z %.0f",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Goto").c_str(),
entity->GetID(),
entity->GetDBID(),
entity->GetModelName(),
entity->GetX(),
entity->GetY(),
entity->GetZ()
);
found_count++;
}
}
if (found_count) {
c->Message(
0, "Found (%i) of type (%s) in zone (%i) total",
found_count,
search_type.c_str(),
entity_count
);
}
}
else {
c->Message(Chat::White, "Usage of #list");
c->Message(Chat::White, "- #list [npcs|players|corpses|doors|objects] [search]");
c->Message(Chat::White, "- Example: #list npc (Blank for all)");
}
}
void command_date(Client *c, const Seperator *sep)
{
//yyyy mm dd hh mm local
if(sep->arg[3][0]==0 || !sep->IsNumber(1) || !sep->IsNumber(2) || !sep->IsNumber(3)) {
c->Message(Chat::Red, "Usage: #date yyyy mm dd [HH MM]");
}
else {
int h=0, m=0;
TimeOfDay_Struct eqTime;
zone->zone_time.GetCurrentEQTimeOfDay( time(0), &eqTime);
if(!sep->IsNumber(4))
h=eqTime.hour;
else
h=atoi(sep->arg[4]);
if(!sep->IsNumber(5))
m=eqTime.minute;
else
m=atoi(sep->arg[5]);
c->Message(Chat::Red, "Setting world time to %s-%s-%s %i:%i...", sep->arg[1], sep->arg[2], sep->arg[3], h, m);
zone->SetDate(atoi(sep->arg[1]), atoi(sep->arg[2]), atoi(sep->arg[3]), h, m);
}
}
void command_timezone(Client *c, const Seperator *sep)
{
if(sep->arg[1][0]==0 && !sep->IsNumber(1)) {
c->Message(Chat::Red, "Usage: #timezone HH [MM]");
c->Message(Chat::Red, "Current timezone is: %ih %im", zone->zone_time.getEQTimeZoneHr(), zone->zone_time.getEQTimeZoneMin());
}
else {
uint8 hours = atoi(sep->arg[1]);
uint8 minutes = atoi(sep->arg[2]);
if(!sep->IsNumber(2))
minutes = 0;
c->Message(Chat::Red, "Setting timezone to %i h %i m", hours, minutes);
uint32 ntz=(hours*60)+minutes;
zone->zone_time.setEQTimeZone(ntz);
database.SetZoneTZ(zone->GetZoneID(), zone->GetInstanceVersion(), ntz);
// Update all clients with new TZ.
auto outapp = new EQApplicationPacket(OP_TimeOfDay, sizeof(TimeOfDay_Struct));
TimeOfDay_Struct* tod = (TimeOfDay_Struct*)outapp->pBuffer;
zone->zone_time.GetCurrentEQTimeOfDay(time(0), tod);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_invul(Client *c, const Seperator *sep)
{
bool state=atobool(sep->arg[1]);
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0] != 0) {
t->SetInvul(state);
c->Message(Chat::White, "%s is %s invulnerable from attack.", t->GetName(), state?"now":"no longer");
}
else
c->Message(Chat::White, "Usage: #invulnerable [on/off]");
}
void command_hideme(Client *c, const Seperator *sep)
{
bool state=atobool(sep->arg[1]);
if(sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #hideme [on/off]");
else
{
c->SetHideMe(state);
c->MessageString(Chat::Broadcasts, c->GetHideMe() ? NOW_INVISIBLE : NOW_VISIBLE, c->GetName());
}
}
void command_emote(Client *c, const Seperator *sep)
{
if (sep->arg[3][0] == 0)
c->Message(Chat::White, "Usage: #emote [name | world | zone] type# message");
else {
if (strcasecmp(sep->arg[1], "zone") == 0){
char* newmessage=0;
if(strstr(sep->arg[3],"^")==0)
entity_list.Message(0, atoi(sep->arg[2]), sep->argplus[3]);
else{
for(newmessage = strtok((char*)sep->arg[3],"^");newmessage!=nullptr;newmessage=strtok(nullptr, "^"))
entity_list.Message(0, atoi(sep->arg[2]), newmessage);
}
}
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else if (strcasecmp(sep->arg[1], "world") == 0)
worldserver.SendEmoteMessage(0, 0, atoi(sep->arg[2]), sep->argplus[3]);
else
worldserver.SendEmoteMessage(sep->arg[1], 0, atoi(sep->arg[2]), sep->argplus[3]);
}
}
void command_fov(Client *c, const Seperator *sep)
{
if(c->GetTarget())
if(c->BehindMob(c->GetTarget(), c->GetX(), c->GetY()))
c->Message(Chat::White, "You are behind mob %s, it is looking to %d", c->GetTarget()->GetName(), c->GetTarget()->GetHeading());
else
c->Message(Chat::White, "You are NOT behind mob %s, it is looking to %d", c->GetTarget()->GetName(), c->GetTarget()->GetHeading());
else
c->Message(Chat::White, "I Need a target!");
}
void command_npcstats(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->Message(Chat::White, "ERROR: No target!");
else if (!c->GetTarget()->IsNPC())
c->Message(Chat::White, "ERROR: Target is not a NPC!");
else {
auto target_npc = c->GetTarget()->CastToNPC();
c->Message(Chat::White, "# NPC Stats");
c->Message(Chat::White, "- Name: %s NpcID: %u", target_npc->GetName(), target_npc->GetNPCTypeID());
c->Message(Chat::White, "- Race: %i Level: %i Class: %i Material: %i", target_npc->GetRace(), target_npc->GetLevel(), target_npc->GetClass(), target_npc->GetTexture());
c->Message(Chat::White, "- Current HP: %i Max HP: %i", target_npc->GetHP(), target_npc->GetMaxHP());
//c->Message(Chat::White, "Weapon Item Number: %s", target_npc->GetWeapNo());
c->Message(Chat::White, "- Gender: %i Size: %f Bodytype: %d", target_npc->GetGender(), target_npc->GetSize(), target_npc->GetBodyType());
c->Message(Chat::White, "- Runspeed: %.3f Walkspeed: %.3f", static_cast<float>(0.025f * target_npc->GetRunspeed()), static_cast<float>(0.025f * target_npc->GetWalkspeed()));
c->Message(Chat::White, "- Spawn Group: %i Grid: %i", target_npc->GetSpawnGroupId(), target_npc->GetGrid());
if (target_npc->proximity) {
c->Message(Chat::White, "- Proximity: Enabled");
c->Message(Chat::White, "-- Cur_X: %1.3f, Cur_Y: %1.3f, Cur_Z: %1.3f", target_npc->GetX(), target_npc->GetY(), target_npc->GetZ());
c->Message(Chat::White, "-- Min_X: %1.3f(%1.3f), Max_X: %1.3f(%1.3f), X_Range: %1.3f", target_npc->proximity->min_x, (target_npc->proximity->min_x - target_npc->GetX()), target_npc->proximity->max_x, (target_npc->proximity->max_x - target_npc->GetX()), (target_npc->proximity->max_x - target_npc->proximity->min_x));
c->Message(Chat::White, "-- Min_Y: %1.3f(%1.3f), Max_Y: %1.3f(%1.3f), Y_Range: %1.3f", target_npc->proximity->min_y, (target_npc->proximity->min_y - target_npc->GetY()), target_npc->proximity->max_y, (target_npc->proximity->max_y - target_npc->GetY()), (target_npc->proximity->max_y - target_npc->proximity->min_y));
c->Message(Chat::White, "-- Min_Z: %1.3f(%1.3f), Max_Z: %1.3f(%1.3f), Z_Range: %1.3f", target_npc->proximity->min_z, (target_npc->proximity->min_z - target_npc->GetZ()), target_npc->proximity->max_z, (target_npc->proximity->max_z - target_npc->GetZ()), (target_npc->proximity->max_z - target_npc->proximity->min_z));
c->Message(Chat::White, "-- Say: %s", (target_npc->proximity->say ? "Enabled" : "Disabled"));
}
else {
c->Message(Chat::White, "-Proximity: Disabled");
}
c->Message(Chat::White, "");
c->Message(Chat::White, "EmoteID: %i", target_npc->GetEmoteID());
target_npc->QueryLoot(c);
}
}
void command_zclip(Client *c, const Seperator *sep)
{
// modifys and resends zhdr packet
if(sep->arg[2][0]==0)
c->Message(Chat::White, "Usage: #zclip <min clip> <max clip>");
else if(atoi(sep->arg[1])<=0)
c->Message(Chat::White, "ERROR: Min clip can not be zero or less!");
else if(atoi(sep->arg[2])<=0)
c->Message(Chat::White, "ERROR: Max clip can not be zero or less!");
else if(atoi(sep->arg[1])>atoi(sep->arg[2]))
c->Message(Chat::White, "ERROR: Min clip is greater than max clip!");
else {
zone->newzone_data.minclip = atof(sep->arg[1]);
zone->newzone_data.maxclip = atof(sep->arg[2]);
if(sep->arg[3][0]!=0)
zone->newzone_data.fog_minclip[0]=atof(sep->arg[3]);
if(sep->arg[4][0]!=0)
zone->newzone_data.fog_minclip[1]=atof(sep->arg[4]);
if(sep->arg[5][0]!=0)
zone->newzone_data.fog_maxclip[0]=atof(sep->arg[5]);
if(sep->arg[6][0]!=0)
zone->newzone_data.fog_maxclip[1]=atof(sep->arg[6]);
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_npccast(Client *c, const Seperator *sep)
{
if (c->GetTarget() && c->GetTarget()->IsNPC() && !sep->IsNumber(1) && sep->arg[1] != 0 && sep->IsNumber(2)) {
Mob* spelltar = entity_list.GetMob(sep->arg[1]);
if (spelltar)
c->GetTarget()->CastSpell(atoi(sep->arg[2]), spelltar->GetID());
else
c->Message(Chat::White, "Error: %s not found", sep->arg[1]);
}
else if (c->GetTarget() && c->GetTarget()->IsNPC() && sep->IsNumber(1) && sep->IsNumber(2) ) {
Mob* spelltar = entity_list.GetMob(atoi(sep->arg[1]));
if (spelltar)
c->GetTarget()->CastSpell(atoi(sep->arg[2]), spelltar->GetID());
else
c->Message(Chat::White, "Error: target ID %i not found", atoi(sep->arg[1]));
}
else
c->Message(Chat::White, "Usage: (needs NPC targeted) #npccast targetname/entityid spellid");
}
void command_zstats(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "Zone Header Data:");
c->Message(Chat::White, "Sky Type: %i", zone->newzone_data.sky);
c->Message(Chat::White, "Fog Colour: Red: %i; Blue: %i; Green %i", zone->newzone_data.fog_red[0], zone->newzone_data.fog_green[0], zone->newzone_data.fog_blue[0]);
c->Message(Chat::White, "Safe Coords: %f, %f, %f", zone->newzone_data.safe_x, zone->newzone_data.safe_y, zone->newzone_data.safe_z);
c->Message(Chat::White, "Underworld Coords: %f", zone->newzone_data.underworld);
c->Message(Chat::White, "Clip Plane: %f - %f", zone->newzone_data.minclip, zone->newzone_data.maxclip);
}
void command_permaclass(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0]==0) {
c->Message(Chat::White,"Usage: #permaclass <classnum>");
}
else if(!t->IsClient())
c->Message(Chat::White,"Target is not a client.");
else {
c->Message(Chat::White, "Setting %s's class...Sending to char select.", t->GetName());
LogInfo("Class change request from [{}] for [{}], requested class:[{}]", c->GetName(), t->GetName(), atoi(sep->arg[1]) );
t->SetBaseClass(atoi(sep->arg[1]));
t->Save();
t->Kick("Class was changed.");
}
}
void command_permarace(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0]==0) {
c->Message(Chat::White,"Usage: #permarace <racenum>");
c->Message(Chat::White,"NOTE: Not all models are global. If a model is not global, it will appear as a human on character select and in zones without the model.");
}
else if(!t->IsClient())
c->Message(Chat::White,"Target is not a client.");
else {
c->Message(Chat::White, "Setting %s's race - zone to take effect", t->GetName());
LogInfo("Permanant race change request from [{}] for [{}], requested race:[{}]", c->GetName(), t->GetName(), atoi(sep->arg[1]) );
uint32 tmp = Mob::GetDefaultGender(atoi(sep->arg[1]), t->GetBaseGender());
t->SetBaseRace(atoi(sep->arg[1]));
t->SetBaseGender(tmp);
t->Save();
t->SendIllusionPacket(atoi(sep->arg[1]));
}
}
void command_permagender(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0]==0) {
c->Message(Chat::White,"Usage: #permagender <gendernum>");
c->Message(Chat::White,"Gender Numbers: 0=Male, 1=Female, 2=Neuter");
}
else if(!t->IsClient())
c->Message(Chat::White,"Target is not a client.");
else {
c->Message(Chat::White, "Setting %s's gender - zone to take effect", t->GetName());
LogInfo("Permanant gender change request from [{}] for [{}], requested gender:[{}]", c->GetName(), t->GetName(), atoi(sep->arg[1]) );
t->SetBaseGender(atoi(sep->arg[1]));
t->Save();
t->SendIllusionPacket(atoi(sep->arg[1]));
}
}
void command_weather(Client *c, const Seperator *sep)
{
if (!(sep->arg[1][0] == '0' || sep->arg[1][0] == '1' || sep->arg[1][0] == '2' || sep->arg[1][0] == '3')) {
c->Message(Chat::White, "Usage: #weather <0/1/2/3> - Off/Rain/Snow/Manual.");
}
else if(zone->zone_weather == 0) {
if(sep->arg[1][0] == '3') { // Put in modifications here because it had a very good chance at screwing up the client's weather system if rain was sent during snow -T7
if(sep->arg[2][0] != 0 && sep->arg[3][0] != 0) {
c->Message(Chat::White, "Sending weather packet... TYPE=%s, INTENSITY=%s", sep->arg[2], sep->arg[3]);
zone->zone_weather = atoi(sep->arg[2]);
auto outapp = new EQApplicationPacket(OP_Weather, 8);
outapp->pBuffer[0] = atoi(sep->arg[2]);
outapp->pBuffer[4] = atoi(sep->arg[3]); // This number changes in the packets, intensity?
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
else {
c->Message(Chat::White, "Manual Usage: #weather 3 <type> <intensity>");
}
}
else if(sep->arg[1][0] == '2') {
entity_list.Message(0, 0, "Snowflakes begin to fall from the sky.");
zone->zone_weather = 2;
auto outapp = new EQApplicationPacket(OP_Weather, 8);
outapp->pBuffer[0] = 0x01;
outapp->pBuffer[4] = 0x02; // This number changes in the packets, intensity?
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
else if(sep->arg[1][0] == '1') {
entity_list.Message(0, 0, "Raindrops begin to fall from the sky.");
zone->zone_weather = 1;
auto outapp = new EQApplicationPacket(OP_Weather, 8);
outapp->pBuffer[4] = 0x01; // This is how it's done in Fear, and you can see a decent distance with it at this value
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
else {
if(zone->zone_weather == 1) { // Doing this because if you have rain/snow on, you can only turn one off.
entity_list.Message(0, 0, "The sky clears as the rain ceases to fall.");
zone->zone_weather = 0;
auto outapp = new EQApplicationPacket(OP_Weather, 8);
// To shutoff weather you send an empty 8 byte packet (You get this everytime you zone even if the sky is clear)
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
else if(zone->zone_weather == 2) {
entity_list.Message(0, 0, "The sky clears as the snow stops falling.");
zone->zone_weather = 0;
auto outapp = new EQApplicationPacket(OP_Weather, 8);
// To shutoff weather you send an empty 8 byte packet (You get this everytime you zone even if the sky is clear)
outapp->pBuffer[0] = 0x01; // Snow has it's own shutoff packet
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
else {
entity_list.Message(0, 0, "The sky clears.");
zone->zone_weather = 0;
auto outapp = new EQApplicationPacket(OP_Weather, 8);
// To shutoff weather you send an empty 8 byte packet (You get this everytime you zone even if the sky is clear)
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
}
void command_zheader(Client *c, const Seperator *sep)
{
// sends zhdr packet
if(sep->arg[1][0]==0) {
c->Message(Chat::White, "Usage: #zheader <zone name>");
}
else if(database.GetZoneID(sep->argplus[1])==0)
c->Message(Chat::White, "Invalid Zone Name: %s", sep->argplus[1]);
else {
if (zone->LoadZoneCFG(sep->argplus[1], 0))
c->Message(Chat::White, "Successfully loaded zone header for %s from database.", sep->argplus[1]);
else
c->Message(Chat::White, "Failed to load zone header %s from database", sep->argplus[1]);
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_zsky(Client *c, const Seperator *sep)
{
// modifys and resends zhdr packet
if(sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #zsky <sky type>");
else if(atoi(sep->arg[1])<0||atoi(sep->arg[1])>255)
c->Message(Chat::White, "ERROR: Sky type can not be less than 0 or greater than 255!");
else {
zone->newzone_data.sky = atoi(sep->arg[1]);
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_zcolor(Client *c, const Seperator *sep)
{
// modifys and resends zhdr packet
if (sep->arg[3][0]==0)
c->Message(Chat::White, "Usage: #zcolor <red> <green> <blue>");
else if (atoi(sep->arg[1])<0||atoi(sep->arg[1])>255)
c->Message(Chat::White, "ERROR: Red can not be less than 0 or greater than 255!");
else if (atoi(sep->arg[2])<0||atoi(sep->arg[2])>255)
c->Message(Chat::White, "ERROR: Green can not be less than 0 or greater than 255!");
else if (atoi(sep->arg[3])<0||atoi(sep->arg[3])>255)
c->Message(Chat::White, "ERROR: Blue can not be less than 0 or greater than 255!");
else {
for (int z=0; z<4; z++) {
zone->newzone_data.fog_red[z] = atoi(sep->arg[1]);
zone->newzone_data.fog_green[z] = atoi(sep->arg[2]);
zone->newzone_data.fog_blue[z] = atoi(sep->arg[3]);
}
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_spon(Client *c, const Seperator *sep)
{
c->MemorizeSpell(0, SPELLBAR_UNLOCK, memSpellSpellbar);
}
void command_spoff(Client *c, const Seperator *sep)
{
auto outapp = new EQApplicationPacket(OP_ManaChange, 0);
outapp->priority = 5;
c->QueuePacket(outapp);
safe_delete(outapp);
}
void command_itemtest(Client *c, const Seperator *sep)
{
char chBuffer[8192] = {0};
//Using this to determine new item layout
FILE* f = nullptr;
if (!(f = fopen("c:\\EQEMUcvs\\ItemDump.txt", "rb"))) {
c->Message(Chat::Red, "Error: Could not open c:\\EQEMUcvs\\ItemDump.txt");
return;
}
fread(chBuffer, sizeof(chBuffer), sizeof(char), f);
fclose(f);
auto outapp = new EQApplicationPacket(OP_ItemLinkResponse, strlen(chBuffer) + 5);
memcpy(&outapp->pBuffer[4], chBuffer, strlen(chBuffer));
c->QueuePacket(outapp);
safe_delete(outapp);
}
void command_gassign(Client *c, const Seperator *sep)
{
if (sep->IsNumber(1) && c->GetTarget() && c->GetTarget()->IsNPC() && c->GetTarget()->CastToNPC()->GetSpawnPointID() > 0) {
int spawn2id = c->GetTarget()->CastToNPC()->GetSpawnPointID();
database.AssignGrid(c, atoi(sep->arg[1]), spawn2id);
}
else
c->Message(Chat::White, "Usage: #gassign [num] - must have an npc target!");
}
void command_ai(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (strcasecmp(sep->arg[1], "factionid") == 0) {
if (target && sep->IsNumber(2)) {
if (target->IsNPC())
target->CastToNPC()->SetNPCFactionID(atoi(sep->arg[2]));
else
c->Message(Chat::White, "%s is not an NPC.", target->GetName());
}
else
c->Message(Chat::White, "Usage: (targeted) #ai factionid [factionid]");
}
else if (strcasecmp(sep->arg[1], "spellslist") == 0) {
if (target && sep->IsNumber(2) && atoi(sep->arg[2]) >= 0) {
if (target->IsNPC())
target->CastToNPC()->AI_AddNPCSpells(atoi(sep->arg[2]));
else
c->Message(Chat::White, "%s is not an NPC.", target->GetName());
}
else
c->Message(Chat::White, "Usage: (targeted) #ai spellslist [npc_spells_id]");
}
else if (strcasecmp(sep->arg[1], "con") == 0) {
if (target && sep->arg[2][0] != 0) {
Mob* tar2 = entity_list.GetMob(sep->arg[2]);
if (tar2)
c->Message(Chat::White, "%s considering %s: %i", target->GetName(), tar2->GetName(), tar2->GetReverseFactionCon(target));
else
c->Message(Chat::White, "Error: %s not found.", sep->arg[2]);
}
else
c->Message(Chat::White, "Usage: (targeted) #ai con [mob name]");
}
else if (strcasecmp(sep->arg[1], "guard") == 0) {
if (target && target->IsNPC())
target->CastToNPC()->SaveGuardSpot(target->GetPosition());
else
c->Message(Chat::White, "Usage: (targeted) #ai guard - sets npc to guard the current location (use #summon to move)");
}
else if (strcasecmp(sep->arg[1], "roambox") == 0) {
if (target && target->IsAIControlled() && target->IsNPC()) {
if ((sep->argnum == 6 || sep->argnum == 7 || sep->argnum == 8) && sep->IsNumber(2) && sep->IsNumber(3) && sep->IsNumber(4) && sep->IsNumber(5) && sep->IsNumber(6)) {
uint32 tmp = 2500;
uint32 tmp2 = 2500;
if (sep->IsNumber(7))
tmp = atoi(sep->arg[7]);
if (sep->IsNumber(8))
tmp2 = atoi(sep->arg[8]);
target->CastToNPC()->AI_SetRoambox(atof(sep->arg[2]), atof(sep->arg[3]), atof(sep->arg[4]), atof(sep->arg[5]), atof(sep->arg[6]), tmp, tmp2);
}
else if ((sep->argnum == 3 || sep->argnum == 4) && sep->IsNumber(2) && sep->IsNumber(3)) {
uint32 tmp = 2500;
uint32 tmp2 = 2500;
if (sep->IsNumber(4))
tmp = atoi(sep->arg[4]);
if (sep->IsNumber(5))
tmp2 = atoi(sep->arg[5]);
target->CastToNPC()->AI_SetRoambox(atof(sep->arg[2]), atof(sep->arg[3]), tmp, tmp2);
}
else {
c->Message(Chat::White, "Usage: #ai roambox dist max_x min_x max_y min_y [delay] [mindelay]");
c->Message(Chat::White, "Usage: #ai roambox dist roamdist [delay] [mindelay]");
}
}
else
c->Message(Chat::White, "You need a AI NPC targeted");
}
else if (strcasecmp(sep->arg[1], "stop") == 0 && c->Admin() >= commandToggleAI) {
if (target) {
if (target->IsAIControlled())
target->AI_Stop();
else
c->Message(Chat::White, "Error: Target is not AI controlled");
}
else
c->Message(Chat::White, "Usage: Target a Mob with AI enabled and use this to turn off their AI.");
}
else if (strcasecmp(sep->arg[1], "start") == 0 && c->Admin() >= commandToggleAI) {
if (target) {
if (!target->IsAIControlled())
target->AI_Start();
else
c->Message(Chat::White, "Error: Target is already AI controlled");
}
else
c->Message(Chat::White, "Usage: Target a Mob with AI disabled and use this to turn on their AI.");
}
else {
c->Message(Chat::White, "#AI Sub-commands");
c->Message(Chat::White, " factionid");
c->Message(Chat::White, " spellslist");
c->Message(Chat::White, " con");
c->Message(Chat::White, " guard");
}
}
void command_worldshutdown(Client *c, const Seperator *sep)
{
// GM command to shutdown world server and all zone servers
uint32 time=0;
uint32 interval=0;
if (worldserver.Connected()) {
if(sep->IsNumber(1) && sep->IsNumber(2) && ((time=atoi(sep->arg[1]))>0) && ((interval=atoi(sep->arg[2]))>0)) {
worldserver.SendEmoteMessage(0,0,15,"<SYSTEMWIDE MESSAGE>:SYSTEM MSG:World coming down in %i minutes, everyone log out before this time.", (time / 60 ));
c->Message(Chat::White, "Sending shutdown packet now, World will shutdown in: %i minutes with an interval of: %i seconds", (time / 60), interval);
auto pack = new ServerPacket(ServerOP_ShutdownAll, sizeof(WorldShutDown_Struct));
WorldShutDown_Struct* wsd = (WorldShutDown_Struct*)pack->pBuffer;
wsd->time=time*1000;
wsd->interval=(interval*1000);
worldserver.SendPacket(pack);
safe_delete(pack);
}
else if(strcasecmp(sep->arg[1], "now") == 0){
worldserver.SendEmoteMessage(0,0,15,"<SYSTEMWIDE MESSAGE>:SYSTEM MSG:World coming down, everyone log out now.");
c->Message(Chat::White, "Sending shutdown packet");
auto pack = new ServerPacket;
pack->opcode = ServerOP_ShutdownAll;
pack->size=0;
worldserver.SendPacket(pack);
safe_delete(pack);
}
else if(strcasecmp(sep->arg[1], "disable") == 0){
c->Message(Chat::White, "Shutdown prevented, next time I may not be so forgiving...");
auto pack = new ServerPacket(ServerOP_ShutdownAll, sizeof(WorldShutDown_Struct));
WorldShutDown_Struct* wsd = (WorldShutDown_Struct*)pack->pBuffer;
wsd->time=0;
wsd->interval=0;
worldserver.SendPacket(pack);
safe_delete(pack);
}
else{
c->Message(Chat::White,"#worldshutdown - Shuts down the server and all zones.");
c->Message(Chat::White,"Usage: #worldshutdown now - Shuts down the server and all zones immediately.");
c->Message(Chat::White,"Usage: #worldshutdown disable - Stops the server from a previously scheduled shut down.");
c->Message(Chat::White,"Usage: #worldshutdown [timer] [interval] - Shuts down the server and all zones after [timer] seconds and sends warning every [interval] seconds.");
}
}
else
c->Message(Chat::White, "Error: World server disconnected");
}
void command_sendzonespawns(Client *c, const Seperator *sep)
{
entity_list.SendZoneSpawns(c);
}
void command_zsave(Client *c, const Seperator *sep)
{
if (zone->SaveZoneCFG()) {
c->Message(Chat::Red, "Zone header saved successfully.");
}
else {
c->Message(Chat::Red, "ERROR: Zone header data was NOT saved.");
}
}
void command_dbspawn2(Client *c, const Seperator *sep)
{
if (sep->IsNumber(1) && sep->IsNumber(2) && sep->IsNumber(3)) {
LogInfo("Spawning database spawn");
uint16 cond = 0;
int16 cond_min = 0;
if(sep->IsNumber(4)) {
cond = atoi(sep->arg[4]);
if(sep->IsNumber(5))
cond_min = atoi(sep->arg[5]);
}
database.CreateSpawn2(c, atoi(sep->arg[1]), zone->GetShortName(), c->GetPosition(), atoi(sep->arg[2]), atoi(sep->arg[3]), cond, cond_min);
}
else {
c->Message(Chat::White, "Usage: #dbspawn2 spawngroup respawn variance [condition_id] [condition_min]");
}
}
void command_shutdown(Client *c, const Seperator *sep)
{
CatchSignal(2);
}
void command_delacct(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Format: #delacct accountname");
else {
std::string user;
std::string loginserver;
ParseAccountString(sep->arg[1], user, loginserver);
if (database.DeleteAccount(user.c_str(), loginserver.c_str()))
c->Message(Chat::White, "The account was deleted.");
else
c->Message(Chat::White, "Unable to delete account.");
}
}
void command_setpass(Client *c, const Seperator *sep)
{
if(sep->argnum != 2)
c->Message(Chat::White, "Format: #setpass accountname password");
else {
std::string user;
std::string loginserver;
ParseAccountString(sep->arg[1], user, loginserver);
int16 tmpstatus = 0;
uint32 tmpid = database.GetAccountIDByName(user.c_str(), loginserver.c_str(), &tmpstatus);
if (!tmpid)
c->Message(Chat::White, "Error: Account not found");
else if (tmpstatus > c->Admin())
c->Message(Chat::White, "Cannot change password: Account's status is higher than yours");
else if (database.SetLocalPassword(tmpid, sep->arg[2]))
c->Message(Chat::White, "Password changed.");
else
c->Message(Chat::White, "Error changing password.");
}
}
void command_setlsinfo(Client *c, const Seperator *sep)
{
if(sep->argnum != 2)
c->Message(Chat::White, "Format: #setlsinfo email password");
else {
auto pack = new ServerPacket(ServerOP_LSAccountUpdate, sizeof(ServerLSAccountUpdate_Struct));
ServerLSAccountUpdate_Struct* s = (ServerLSAccountUpdate_Struct *) pack->pBuffer;
s->useraccountid = c->LSAccountID();
strn0cpy(s->useraccount, c->AccountName(), 30);
strn0cpy(s->user_email, sep->arg[1], 100);
strn0cpy(s->userpassword, sep->arg[2], 50);
worldserver.SendPacket(pack);
c->Message(Chat::White, "Login Server update packet sent.");
}
}
void command_grid(Client *c, const Seperator *sep)
{
if (strcasecmp("max", sep->arg[1]) == 0) {
c->Message(Chat::White, "Highest grid ID in this zone: %d", database.GetHighestGrid(zone->GetZoneID()));
}
else if (strcasecmp("add", sep->arg[1]) == 0) {
database.ModifyGrid(c, false, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), zone->GetZoneID());
}
else if (strcasecmp("show", sep->arg[1]) == 0) {
Mob *target = c->GetTarget();
if (!target || !target->IsNPC()) {
c->Message(Chat::White, "You need a NPC target!");
return;
}
std::string query = StringFormat(
"SELECT `x`, `y`, `z`, `heading`, `number`, `pause` "
"FROM `grid_entries` "
"WHERE `zoneid` = %u and `gridid` = %i "
"ORDER BY `number` ",
zone->GetZoneID(),
target->CastToNPC()->GetGrid()
);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Error querying database.");
c->Message(Chat::White, query.c_str());
}
if (results.RowCount() == 0) {
c->Message(Chat::White, "No grid found");
return;
}
/**
* Depop any node npc's already spawned
*/
auto &mob_list = entity_list.GetMobList();
for (auto itr = mob_list.begin(); itr != mob_list.end(); ++itr) {
Mob *mob = itr->second;
if (mob->IsNPC() && mob->GetRace() == 2254) {
mob->Depop();
}
}
/**
* Spawn grid nodes
*/
for (auto row = results.begin(); row != results.end(); ++row) {
auto node_position = glm::vec4(atof(row[0]), atof(row[1]), atof(row[2]), atof(row[3]));
NPC *npc = NPC::SpawnGridNodeNPC(
target->GetCleanName(),
node_position,
static_cast<uint32>(target->CastToNPC()->GetGrid()),
static_cast<uint32>(atoi(row[4])),
static_cast<uint32>(atoi(row[5]))
);
npc->SetFlyMode(GravityBehavior::Flying);
npc->GMMove(node_position.x, node_position.y, node_position.z, node_position.w);
}
}
else if (strcasecmp("delete", sep->arg[1]) == 0) {
database.ModifyGrid(c, true, atoi(sep->arg[2]), 0, 0, zone->GetZoneID());
}
else {
c->Message(Chat::White, "Usage: #grid add/delete grid_num wandertype pausetype");
c->Message(Chat::White, "Usage: #grid max - displays the highest grid ID used in this zone (for add)");
}
}
void command_wp(Client *c, const Seperator *sep)
{
int wp = atoi(sep->arg[4]);
if (strcasecmp("add", sep->arg[1]) == 0) {
if (wp == 0) //default to highest if it's left blank, or we enter 0
wp = database.GetHighestWaypoint(zone->GetZoneID(), atoi(sep->arg[2])) + 1;
if (strcasecmp("-h", sep->arg[5]) == 0) {
database.AddWP(c, atoi(sep->arg[2]),wp, c->GetPosition(), atoi(sep->arg[3]),zone->GetZoneID());
}
else {
auto position = c->GetPosition();
position.w = -1;
database.AddWP(c, atoi(sep->arg[2]),wp, position, atoi(sep->arg[3]),zone->GetZoneID());
}
}
else if (strcasecmp("delete", sep->arg[1]) == 0)
database.DeleteWaypoint(c, atoi(sep->arg[2]),wp,zone->GetZoneID());
else
c->Message(Chat::White,"Usage: #wp add/delete grid_num pause wp_num [-h]");
}
void command_iplookup(Client *c, const Seperator *sep)
{
auto pack =
new ServerPacket(ServerOP_IPLookup, sizeof(ServerGenericWorldQuery_Struct) + strlen(sep->argplus[1]) + 1);
ServerGenericWorldQuery_Struct* s = (ServerGenericWorldQuery_Struct *) pack->pBuffer;
strcpy(s->from, c->GetName());
s->admin = c->Admin();
if (sep->argplus[1][0] != 0)
strcpy(s->query, sep->argplus[1]);
worldserver.SendPacket(pack);
safe_delete(pack);
}
void command_size(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White, "Usage: #size [0 - 255] (Decimal increments are allowed)");
else {
float newsize = atof(sep->arg[1]);
if (newsize > 255)
c->Message(Chat::White, "Error: #size: Size can not be greater than 255.");
else if (newsize < 0)
c->Message(Chat::White, "Error: #size: Size can not be less than 0.");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails, newsize);
c->Message(Chat::White,"Size = %f", atof(sep->arg[1]));
}
}
}
void command_mana(Client *c, const Seperator *sep)
{
Mob *t;
t = c->GetTarget() ? c->GetTarget() : c;
if(t->IsClient())
t->CastToClient()->SetMana(t->CastToClient()->CalcMaxMana());
else
t->SetMana(t->CalcMaxMana());
}
void command_flymode(Client *c, const Seperator *sep)
{
Mob *t = c;
if (strlen(sep->arg[1]) == 1 && !(sep->arg[1][0] == '0' || sep->arg[1][0] == '1' || sep->arg[1][0] == '2' || sep->arg[1][0] == '3' || sep->arg[1][0] == '4' || sep->arg[1][0] == '5'))
c->Message(Chat::White, "#flymode [0/1/2/3/4/5]");
else {
if (c->GetTarget()) {
t = c->GetTarget();
}
int fm = atoi(sep->arg[1]);
t->SetFlyMode(static_cast<GravityBehavior>(fm));
t->SendAppearancePacket(AT_Levitate, fm);
if (sep->arg[1][0] == '0') {
c->Message(Chat::White, "Setting %s to Grounded", t->GetName());
}
else if (sep->arg[1][0] == '1') {
c->Message(Chat::White, "Setting %s to Flying", t->GetName());
}
else if (sep->arg[1][0] == '2') {
c->Message(Chat::White, "Setting %s to Levitating", t->GetName());
}
else if (sep->arg[1][0] == '3') {
c->Message(Chat::White, "Setting %s to In Water", t->GetName());
}
else if (sep->arg[1][0] == '4') {
c->Message(Chat::White, "Setting %s to Floating(Boat)", t->GetName());
}
else if (sep->arg[1][0] == '5') {
c->Message(Chat::White, "Setting %s to Levitating While Running", t->GetName());
}
}
}
void command_showskills(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
c->Message(Chat::White, "Skills for %s", t->GetName());
for (EQEmu::skills::SkillType i = EQEmu::skills::Skill1HBlunt; i <= EQEmu::skills::HIGHEST_SKILL; i = (EQEmu::skills::SkillType)(i + 1))
c->Message(Chat::White, "Skill [%d] is at [%d] - %u", i, t->GetSkill(i), t->GetRawSkill(i));
}
void command_findspell(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Usage: #FindSpell [spellname]");
else if (SPDAT_RECORDS <= 0)
c->Message(Chat::White, "Spells not loaded");
else if (Seperator::IsNumber(sep->argplus[1])) {
int spellid = atoi(sep->argplus[1]);
if (spellid <= 0 || spellid >= SPDAT_RECORDS) {
c->Message(Chat::White, "Error: Number out of range");
}
else {
c->Message(Chat::White, " %i: %s", spellid, spells[spellid].name);
}
}
else {
int count=0;
//int iSearchLen = strlen(sep->argplus[1])+1;
char sName[64];
char sCriteria[65];
strn0cpy(sCriteria, sep->argplus[1], 64);
strupr(sCriteria);
for (int i=0; i<SPDAT_RECORDS; i++) {
if (spells[i].name[0] != 0) {
strcpy(sName, spells[i].name);
strupr(sName);
char* pdest = strstr(sName, sCriteria);
if ((pdest != nullptr) && (count <=20)) {
c->Message(Chat::White, " %i: %s", i, spells[i].name);
count++;
}
else if (count > 20)
break;
}
}
if (count > 20)
c->Message(Chat::White, "20 spells found... max reached.");
else
c->Message(Chat::White, "%i spells found.", count);
}
}
void command_castspell(Client *c, const Seperator *sep)
{
if (!sep->IsNumber(1))
c->Message(Chat::White, "Usage: #CastSpell spellid");
else {
uint16 spellid = atoi(sep->arg[1]);
/*
Spell restrictions.
*/
if (((spellid == 2859) || (spellid == 841) || (spellid == 300) || (spellid == 2314) ||
(spellid == 3716) || (spellid == 911) || (spellid == 3014) || (spellid == 982) ||
(spellid == 905) || (spellid == 2079) || (spellid == 1218) || (spellid == 819) ||
((spellid >= 780) && (spellid <= 785)) || ((spellid >= 1200) && (spellid <= 1205)) ||
((spellid >= 1342) && (spellid <= 1348)) || (spellid == 1923) || (spellid == 1924) ||
(spellid == 3355)) &&
c->Admin() < commandCastSpecials)
c->Message(Chat::Red, "Unable to cast spell.");
else if (spellid >= SPDAT_RECORDS)
c->Message(Chat::White, "Error: #CastSpell: Argument out of range");
else
if (c->GetTarget() == 0)
if(c->Admin() >= commandInstacast)
c->SpellFinished(spellid, 0, EQEmu::spells::CastingSlot::Item, 0, -1, spells[spellid].ResistDiff);
else
c->CastSpell(spellid, 0, EQEmu::spells::CastingSlot::Item, 0);
else
if(c->Admin() >= commandInstacast)
c->SpellFinished(spellid, c->GetTarget(), EQEmu::spells::CastingSlot::Item, 0, -1, spells[spellid].ResistDiff);
else
c->CastSpell(spellid, c->GetTarget()->GetID(), EQEmu::spells::CastingSlot::Item, 0);
}
}
void command_setlanguage(Client *c, const Seperator *sep)
{
if (strcasecmp(sep->arg[1], "list" ) == 0 )
{
c->Message(Chat::White, "Languages:");
c->Message(Chat::White, "(0) Common Tongue");
c->Message(Chat::White, "(1) Barbarian");
c->Message(Chat::White, "(2) Erudian");
c->Message(Chat::White, "(3) Elvish");
c->Message(Chat::White, "(4) Dark Elvish");
c->Message(Chat::White, "(5) Dwarvish");
c->Message(Chat::White, "(6) Troll");
c->Message(Chat::White, "(7) Ogre");
c->Message(Chat::White, "(8) Gnomish");
c->Message(Chat::White, "(9) Halfling");
c->Message(Chat::White, "(10) Thieves Cant");
c->Message(Chat::White, "(11) Old Erudian");
c->Message(Chat::White, "(12) Elder Elvish");
c->Message(Chat::White, "(13) Froglok");
c->Message(Chat::White, "(14) Goblin");
c->Message(Chat::White, "(15) Gnoll");
c->Message(Chat::White, "(16) Combine Tongue");
c->Message(Chat::White, "(17) Elder Teir`Dal");
c->Message(Chat::White, "(18) Lizardman");
c->Message(Chat::White, "(19) Orcish");
c->Message(Chat::White, "(20) Faerie");
c->Message(Chat::White, "(21) Dragon");
c->Message(Chat::White, "(22) Elder Dragon");
c->Message(Chat::White, "(23) Dark Speech");
c->Message(Chat::White, "(24) Vah Shir");
c->Message(Chat::White, "(25) Alaran");
c->Message(Chat::White, "(26) Hadal");
c->Message(Chat::White, "(27) Unknown1");
}
else if( c->GetTarget() == 0 )
{
c->Message(Chat::White, "Error: #setlanguage: No target.");
}
else if( !c->GetTarget()->IsClient() )
{
c->Message(Chat::White, "Error: Target must be a player.");
}
else if (
!sep->IsNumber(1) || atoi(sep->arg[1]) < 0 || atoi(sep->arg[1]) > 27 ||
!sep->IsNumber(2) || atoi(sep->arg[2]) < 0 || atoi(sep->arg[2]) > 100
)
{
c->Message(Chat::White, "Usage: #setlanguage [language ID] [value] (0-27, 0-100)");
c->Message(Chat::White, "Try #setlanguage list for a list of language IDs");
}
else
{
LogInfo("Set language request from [{}], target:[{}] lang_id:[{}] value:[{}]", c->GetName(), c->GetTarget()->GetName(), atoi(sep->arg[1]), atoi(sep->arg[2]) );
uint8 langid = (uint8)atoi(sep->arg[1]);
uint8 value = (uint8)atoi(sep->arg[2]);
c->GetTarget()->CastToClient()->SetLanguageSkill( langid, value );
}
}
void command_setskill(Client *c, const Seperator *sep)
{
if (c->GetTarget() == nullptr) {
c->Message(Chat::White, "Error: #setskill: No target.");
}
else if (!c->GetTarget()->IsClient()) {
c->Message(Chat::White, "Error: #setskill: Target must be a client.");
}
else if (
!sep->IsNumber(1) || atoi(sep->arg[1]) < 0 || atoi(sep->arg[1]) > EQEmu::skills::HIGHEST_SKILL ||
!sep->IsNumber(2) || atoi(sep->arg[2]) < 0 || atoi(sep->arg[2]) > HIGHEST_CAN_SET_SKILL
)
{
c->Message(Chat::White, "Usage: #setskill skill x ");
c->Message(Chat::White, " skill = 0 to %d", EQEmu::skills::HIGHEST_SKILL);
c->Message(Chat::White, " x = 0 to %d", HIGHEST_CAN_SET_SKILL);
}
else {
LogInfo("Set skill request from [{}], target:[{}] skill_id:[{}] value:[{}]", c->GetName(), c->GetTarget()->GetName(), atoi(sep->arg[1]), atoi(sep->arg[2]) );
int skill_num = atoi(sep->arg[1]);
uint16 skill_value = atoi(sep->arg[2]);
if (skill_num <= EQEmu::skills::HIGHEST_SKILL)
c->GetTarget()->CastToClient()->SetSkill((EQEmu::skills::SkillType)skill_num, skill_value);
}
}
void command_setskillall(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->Message(Chat::White, "Error: #setallskill: No target.");
else if (!c->GetTarget()->IsClient())
c->Message(Chat::White, "Error: #setskill: Target must be a client.");
else if (!sep->IsNumber(1) || atoi(sep->arg[1]) < 0 || atoi(sep->arg[1]) > HIGHEST_CAN_SET_SKILL) {
c->Message(Chat::White, "Usage: #setskillall value ");
c->Message(Chat::White, " value = 0 to %d", HIGHEST_CAN_SET_SKILL);
}
else {
if (c->Admin() >= commandSetSkillsOther || c->GetTarget()==c || c->GetTarget()==0) {
LogInfo("Set ALL skill request from [{}], target:[{}]", c->GetName(), c->GetTarget()->GetName());
uint16 level = atoi(sep->arg[1]);
for (EQEmu::skills::SkillType skill_num = EQEmu::skills::Skill1HBlunt; skill_num <= EQEmu::skills::HIGHEST_SKILL; skill_num = (EQEmu::skills::SkillType)(skill_num + 1)) {
c->GetTarget()->CastToClient()->SetSkill(skill_num, level);
}
}
else
c->Message(Chat::White, "Error: Your status is not high enough to set anothers skills");
}
}
void command_race(Client *c, const Seperator *sep)
{
Mob *target = c->CastToMob();
if (sep->IsNumber(1)) {
auto race = atoi(sep->arg[1]);
if ((race >= 0 && race <= 732) || (race >= 2253 && race <= 2259)) {
if ((c->GetTarget()) && c->Admin() >= commandRaceOthers) {
target = c->GetTarget();
}
target->SendIllusionPacket(race);
}
else {
c->Message(Chat::White, "Usage: #race [0-732, 2253-2259] (0 for back to normal)");
}
}
else {
c->Message(Chat::White, "Usage: #race [0-732, 2253-2259] (0 for back to normal)");
}
}
void command_gender(Client *c, const Seperator *sep)
{
Mob *t=c->CastToMob();
if (sep->IsNumber(1) && atoi(sep->arg[1]) >= 0 && atoi(sep->arg[1]) <= 500) {
if ((c->GetTarget()) && c->Admin() >= commandGenderOthers)
t=c->GetTarget();
t->SendIllusionPacket(t->GetRace(), atoi(sep->arg[1]));
}
else
c->Message(Chat::White, "Usage: #gender [0/1/2]");
}
void command_makepet(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == '\0')
c->Message(Chat::White, "Usage: #makepet pet_type_name (will not survive across zones)");
else
c->MakePet(0, sep->arg[1]);
}
void command_level(Client *c, const Seperator *sep)
{
uint16 level = atoi(sep->arg[1]);
if ((level <= 0) || ((level > RuleI(Character, MaxLevel)) && (c->Admin() < commandLevelAboveCap))) {
c->Message(Chat::White, "Error: #Level: Invalid Level");
}
else if (c->Admin() < RuleI(GM, MinStatusToLevelTarget)) {
c->SetLevel(level, true);
#ifdef BOTS
if(RuleB(Bots, BotLevelsWithOwner))
Bot::LevelBotWithClient(c, level, true);
#endif
}
else if (!c->GetTarget()) {
c->Message(Chat::White, "Error: #Level: No target");
}
else {
if (!c->GetTarget()->IsNPC() && ((c->Admin() < commandLevelNPCAboveCap) && (level > RuleI(Character, MaxLevel)))) {
c->Message(Chat::White, "Error: #Level: Invalid Level");
}
else {
c->GetTarget()->SetLevel(level, true);
if(c->GetTarget()->IsClient()) {
c->GetTarget()->CastToClient()->SendLevelAppearance();
#ifdef BOTS
if(RuleB(Bots, BotLevelsWithOwner))
Bot::LevelBotWithClient(c->GetTarget()->CastToClient(), level, true);
#endif
}
}
}
}
void command_spawneditmass(Client *c, const Seperator *sep)
{
std::string query = fmt::format(
SQL(
SELECT
npc_types.id,
npc_types.name,
spawn2.respawntime,
spawn2.id
FROM
npc_types
JOIN spawnentry ON spawnentry.npcID = npc_types.id
JOIN spawn2 ON spawn2.spawngroupID = spawnentry.spawngroupID
WHERE
spawn2.zone = '{0}' and spawn2.version = {1}
GROUP BY npc_types.id
),
zone->GetShortName(),
zone->GetInstanceVersion()
);
std::string status = "(Searching)";
if (strcasecmp(sep->arg[4], "apply") == 0) {
status = "(Applying)";
}
std::string search_value;
std::string edit_option;
std::string edit_value;
std::string apply_set;
if (sep->arg[1]) {
search_value = sep->arg[1];
}
if (sep->arg[2]) {
edit_option = sep->arg[2];
}
if (sep->arg[3]) {
edit_value = sep->arg[3];
}
if (sep->arg[4]) {
apply_set = sep->arg[4];
}
if (!edit_option.empty() && edit_value.empty()) {
c->Message(Chat::Yellow, "Please specify an edit option value | #npceditmass <search> <option> <value>");
return;
}
std::vector<std::string> npc_ids;
std::vector<std::string> spawn2_ids;
int found_count = 0;
auto results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
std::string npc_id = row[0];
std::string npc_name = row[1];
std::string respawn_time = row[2];
std::string spawn2_id = row[3];
if (npc_name.find(search_value) == std::string::npos) {
continue;
}
c->Message(
Chat::Yellow,
fmt::format(
"NPC ({0}) [{1}] respawn_time [{2}] {3}",
npc_id,
npc_name,
respawn_time,
status
).c_str()
);
npc_ids.push_back(npc_id);
spawn2_ids.push_back(spawn2_id);
found_count++;
}
c->Message(Chat::Yellow, "Found [%i] NPC Spawn2 entries that match this criteria in this zone", found_count);
if (edit_option.empty()) {
c->Message(Chat::Yellow, "Please specify an edit option | #npceditmass <search> <option>");
c->Message(Chat::Yellow, "Options [respawn_time]");
return;
}
std::string saylink = fmt::format(
"#spawneditmass {} {} {} apply",
search_value,
edit_option,
edit_value
);
if (found_count > 0) {
c->Message(
Chat::Yellow, "To apply these changes, click <%s> or type [%s]",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Apply").c_str(),
saylink.c_str()
);
}
if (edit_option == "respawn_time" && apply_set == "apply") {
std::string spawn2_ids_string = implode(",", spawn2_ids);
if (spawn2_ids_string.empty()) {
c->Message(Chat::Red, "Error: Ran into an unknown error compiling Spawn2 IDs");
return;
}
database.QueryDatabase(
fmt::format(
SQL(
UPDATE spawn2 SET respawntime = {} WHERE id IN({})
),
std::stoi(edit_value),
spawn2_ids_string
)
);
c->Message(Chat::Yellow, "Updated [%i] spawns", found_count);
}
}
void command_spawn(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] != 0){
Client* client = entity_list.GetClientByName(sep->arg[1]);
if(client){
c->Message(Chat::White,"You cannot spawn a mob with the same name as a character!");
return;
}
}
NPC* npc = NPC::SpawnNPC(sep->argplus[1], c->GetPosition(), c);
if (!npc) {
c->Message(Chat::White, "Format: #spawn name race level material hp gender class priweapon secweapon merchantid bodytype - spawns a npc those parameters.");
c->Message(Chat::White, "Name Format: NPCFirstname_NPCLastname - All numbers in a name are stripped and \"_\" characters become a space.");
c->Message(Chat::White, "Note: Using \"-\" for gender will autoselect the gender for the race. Using \"-\" for HP will use the calculated maximum HP.");
}
}
void command_test(Client *c, const Seperator *sep)
{
c->Message(Chat::Yellow, "Triggering test command");
if (sep->arg[1]) {
c->SetPrimaryWeaponOrnamentation(atoi(sep->arg[1]));
}
if (sep->arg[2]) {
c->SetSecondaryWeaponOrnamentation(atoi(sep->arg[2]));
}
}
void command_texture(Client *c, const Seperator *sep)
{
uint16 texture;
if (sep->IsNumber(1) && atoi(sep->arg[1]) >= 0 && atoi(sep->arg[1]) <= 255) {
texture = atoi(sep->arg[1]);
uint8 helm = 0xFF;
// Player Races Wear Armor, so Wearchange is sent instead
int i;
if (!c->GetTarget())
for (i = EQEmu::textures::textureBegin; i <= EQEmu::textures::LastTintableTexture; i++)
{
c->SendTextureWC(i, texture);
}
else if ((c->GetTarget()->GetModel() > 0 && c->GetTarget()->GetModel() <= 12) ||
c->GetTarget()->GetModel() == 128 || c->GetTarget()->GetModel() == 130 ||
c->GetTarget()->GetModel() == 330 || c->GetTarget()->GetModel() == 522) {
for (i = EQEmu::textures::textureBegin; i <= EQEmu::textures::LastTintableTexture; i++)
{
c->GetTarget()->SendTextureWC(i, texture);
}
}
else // Non-Player Races only need Illusion Packets to be sent for texture
{
if (sep->IsNumber(2) && atoi(sep->arg[2]) >= 0 && atoi(sep->arg[2]) <= 255)
helm = atoi(sep->arg[2]);
else
helm = texture;
if (texture == 255) {
texture = 0xFFFF; // Should be pulling these from the database instead
helm = 0xFF;
}
if ((c->GetTarget()) && (c->Admin() >= commandTextureOthers))
c->GetTarget()->SendIllusionPacket(c->GetTarget()->GetModel(), 0xFF, texture, helm);
else
c->SendIllusionPacket(c->GetRace(), 0xFF, texture, helm);
}
}
else
c->Message(Chat::White, "Usage: #texture [texture] [helmtexture] (0-255, 255 for show equipment)");
}
void command_npctypespawn(Client *c, const Seperator *sep)
{
if (sep->IsNumber(1)) {
const NPCType* tmp = 0;
if ((tmp = database.LoadNPCTypesData(atoi(sep->arg[1])))) {
//tmp->fixedZ = 1;
auto npc = new NPC(tmp, 0, c->GetPosition(), GravityBehavior::Water);
if (npc && sep->IsNumber(2))
npc->SetNPCFactionID(atoi(sep->arg[2]));
npc->AddLootTable();
if (npc->DropsGlobalLoot())
npc->CheckGlobalLootTables();
entity_list.AddNPC(npc);
}
else
c->Message(Chat::White, "NPC Type %i not found", atoi(sep->arg[1]));
}
else
c->Message(Chat::White, "Usage: #npctypespawn npctypeid factionid");
}
void command_nudge(Client* c, const Seperator* sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #nudge [x=f] [y=f] [z=f] [h=f] (partial/mixed arguments allowed)");
}
else {
auto target = c->GetTarget();
if (!target) {
c->Message(Chat::Yellow, "This command requires a target.");
return;
}
if (target->IsMoving()) {
c->Message(Chat::Yellow, "This command requires a stationary target.");
return;
}
glm::vec4 position_offset(0.0f, 0.0f, 0.0f, 0.0f);
for (auto index = 1; index <= 4; ++index) {
if (!sep->arg[index]) {
continue;
}
Seperator argsep(sep->arg[index], '=');
if (!argsep.arg[1][0]) {
continue;
}
switch (argsep.arg[0][0]) {
case 'x':
position_offset.x = atof(argsep.arg[1]);
break;
case 'y':
position_offset.y = atof(argsep.arg[1]);
break;
case 'z':
position_offset.z = atof(argsep.arg[1]);
break;
case 'h':
position_offset.w = atof(argsep.arg[1]);
break;
default:
break;
}
}
const auto& current_position = target->GetPosition();
glm::vec4 new_position(
(current_position.x + position_offset.x),
(current_position.y + position_offset.y),
(current_position.z + position_offset.z),
(current_position.w + position_offset.w)
);
target->GMMove(new_position.x, new_position.y, new_position.z, new_position.w);
c->Message(
Chat::White,
"Nudging '%s' to {%1.3f, %1.3f, %1.3f, %1.2f} (adjustment: {%1.3f, %1.3f, %1.3f, %1.2f})",
target->GetName(),
new_position.x,
new_position.y,
new_position.z,
new_position.w,
position_offset.x,
position_offset.y,
position_offset.z,
position_offset.w
);
}
}
void command_heal(Client *c, const Seperator *sep)
{
if (c->GetTarget()==0)
c->Message(Chat::White, "Error: #Heal: No Target.");
else
c->GetTarget()->Heal();
}
void command_appearance(Client *c, const Seperator *sep)
{
Mob *t=c->CastToMob();
// sends any appearance packet
// Dev debug command, for appearance types
if (sep->arg[2][0] == 0)
c->Message(Chat::White, "Usage: #appearance type value");
else {
if ((c->GetTarget()))
t=c->GetTarget();
t->SendAppearancePacket(atoi(sep->arg[1]), atoi(sep->arg[2]));
c->Message(Chat::White, "Sending appearance packet: target=%s, type=%s, value=%s", t->GetName(), sep->arg[1], sep->arg[2]);
}
}
void command_nukeitem(Client *c, const Seperator *sep)
{
int numitems, itemid;
if (c->GetTarget() && c->GetTarget()->IsClient() && (sep->IsNumber(1) || sep->IsHexNumber(1))) {
itemid=sep->IsNumber(1)?atoi(sep->arg[1]):hextoi(sep->arg[1]);
numitems = c->GetTarget()->CastToClient()->NukeItem(itemid);
c->Message(Chat::White, " %u items deleted", numitems);
}
else
c->Message(Chat::White, "Usage: (targted) #nukeitem itemnum - removes the item from the player's inventory");
}
void command_peekinv(Client *c, const Seperator *sep)
{
// this can be cleaned up once inventory is cleaned up
enum {
peekNone = 0x0000,
peekEquip = 0x0001,
peekGen = 0x0002,
peekCursor = 0x0004,
peekLimbo = 0x0008,
peekTrib = 0x0010,
peekBank = 0x0020,
peekShBank = 0x0040,
peekTrade = 0x0080,
peekWorld = 0x0100,
peekOutOfScope = (peekWorld * 2) // less than
};
static const char* scope_prefix[] = { "equip", "gen", "cursor", "limbo", "trib", "bank", "shbank", "trade", "world" };
static const int16 scope_range[][2] = {
{ EQEmu::invslot::EQUIPMENT_BEGIN, EQEmu::invslot::EQUIPMENT_END },
{ EQEmu::invslot::GENERAL_BEGIN, EQEmu::invslot::GENERAL_END },
{ EQEmu::invslot::slotCursor, EQEmu::invslot::slotCursor },
{ EQEmu::invslot::SLOT_INVALID, EQEmu::invslot::SLOT_INVALID },
{ EQEmu::invslot::TRIBUTE_BEGIN, EQEmu::invslot::TRIBUTE_END },
{ EQEmu::invslot::BANK_BEGIN, EQEmu::invslot::BANK_END },
{ EQEmu::invslot::SHARED_BANK_BEGIN, EQEmu::invslot::SHARED_BANK_END },
{ EQEmu::invslot::TRADE_BEGIN, EQEmu::invslot::TRADE_END },
{ EQEmu::invslot::SLOT_BEGIN, (EQEmu::invtype::WORLD_SIZE - 1) }
};
static const bool scope_bag[] = { false, true, true, true, false, true, true, true, true };
if (!c)
return;
if (c->GetTarget() && !c->GetTarget()->IsClient()) {
c->Message(Chat::White, "You must target a PC for this command.");
return;
}
int scopeMask = peekNone;
if (strcasecmp(sep->arg[1], "all") == 0) { scopeMask = (peekOutOfScope - 1); }
else if (strcasecmp(sep->arg[1], "equip") == 0) { scopeMask |= peekEquip; }
else if (strcasecmp(sep->arg[1], "gen") == 0) { scopeMask |= peekGen; }
else if (strcasecmp(sep->arg[1], "cursor") == 0) { scopeMask |= peekCursor; }
else if (strcasecmp(sep->arg[1], "poss") == 0) { scopeMask |= (peekEquip | peekGen | peekCursor); }
else if (strcasecmp(sep->arg[1], "limbo") == 0) { scopeMask |= peekLimbo; }
else if (strcasecmp(sep->arg[1], "curlim") == 0) { scopeMask |= (peekCursor | peekLimbo); }
else if (strcasecmp(sep->arg[1], "trib") == 0) { scopeMask |= peekTrib; }
else if (strcasecmp(sep->arg[1], "bank") == 0) { scopeMask |= peekBank; }
else if (strcasecmp(sep->arg[1], "shbank") == 0) { scopeMask |= peekShBank; }
else if (strcasecmp(sep->arg[1], "allbank") == 0) { scopeMask |= (peekBank | peekShBank); }
else if (strcasecmp(sep->arg[1], "trade") == 0) { scopeMask |= peekTrade; }
else if (strcasecmp(sep->arg[1], "world") == 0) { scopeMask |= peekWorld; }
if (!scopeMask) {
c->Message(Chat::White, "Usage: #peekinv [equip|gen|cursor|poss|limbo|curlim|trib|bank|shbank|allbank|trade|world|all]");
c->Message(Chat::White, "- Displays a portion of the targeted user's inventory");
c->Message(Chat::White, "- Caution: 'all' is a lot of information!");
return;
}
Client* targetClient = c;
if (c->GetTarget())
targetClient = c->GetTarget()->CastToClient();
const EQEmu::ItemInstance* inst_main = nullptr;
const EQEmu::ItemInstance* inst_sub = nullptr;
const EQEmu::ItemInstance* inst_aug = nullptr;
const EQEmu::ItemData* item_data = nullptr;
EQEmu::SayLinkEngine linker;
linker.SetLinkType(EQEmu::saylink::SayLinkItemInst);
c->Message(Chat::White, "Displaying inventory for %s...", targetClient->GetName());
Object* objectTradeskill = targetClient->GetTradeskillObject();
bool itemsFound = false;
for (int scopeIndex = 0, scopeBit = peekEquip; scopeBit < peekOutOfScope; ++scopeIndex, scopeBit <<= 1) {
if (scopeBit & ~scopeMask)
continue;
if (scopeBit & peekWorld) {
if (objectTradeskill == nullptr) {
c->Message(Chat::Default, "No world tradeskill object selected...");
continue;
}
else {
c->Message(Chat::White, "[WorldObject DBID: %i (entityid: %i)]", objectTradeskill->GetDBID(), objectTradeskill->GetID());
}
}
for (int16 indexMain = scope_range[scopeIndex][0]; indexMain <= scope_range[scopeIndex][1]; ++indexMain) {
if (indexMain == EQEmu::invslot::SLOT_INVALID)
continue;
inst_main = ((scopeBit & peekWorld) ? objectTradeskill->GetItem(indexMain) : targetClient->GetInv().GetItem(indexMain));
if (inst_main) {
itemsFound = true;
item_data = inst_main->GetItem();
}
else {
item_data = nullptr;
}
linker.SetItemInst(inst_main);
c->Message(
(item_data == nullptr),
"%sSlot: %i, Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
((scopeBit & peekWorld) ? (EQEmu::invslot::WORLD_BEGIN + indexMain) : indexMain),
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_main == nullptr) ? 0 : inst_main->GetCharges())
);
if (inst_main && inst_main->IsClassCommon()) {
for (uint8 indexAug = EQEmu::invaug::SOCKET_BEGIN; indexAug <= EQEmu::invaug::SOCKET_END; ++indexAug) {
inst_aug = inst_main->GetItem(indexAug);
if (!inst_aug) // extant only
continue;
item_data = inst_aug->GetItem();
linker.SetItemInst(inst_aug);
c->Message(
(item_data == nullptr),
".%sAugSlot: %i (Slot #%i, Aug idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
INVALID_INDEX,
((scopeBit & peekWorld) ? (EQEmu::invslot::WORLD_BEGIN + indexMain) : indexMain),
indexAug,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
}
}
if (!scope_bag[scopeIndex] || !(inst_main && inst_main->IsClassBag()))
continue;
for (uint8 indexSub = EQEmu::invbag::SLOT_BEGIN; indexSub <= EQEmu::invbag::SLOT_END; ++indexSub) {
inst_sub = inst_main->GetItem(indexSub);
if (!inst_sub) // extant only
continue;
item_data = inst_sub->GetItem();
linker.SetItemInst(inst_sub);
c->Message(
(item_data == nullptr),
"..%sBagSlot: %i (Slot #%i, Bag idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
((scopeBit & peekWorld) ? INVALID_INDEX : EQEmu::InventoryProfile::CalcSlotId(indexMain, indexSub)),
((scopeBit & peekWorld) ? (EQEmu::invslot::WORLD_BEGIN + indexMain) : indexMain),
indexSub,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
if (inst_sub->IsClassCommon()) {
for (uint8 indexAug = EQEmu::invaug::SOCKET_BEGIN; indexAug <= EQEmu::invaug::SOCKET_END; ++indexAug) {
inst_aug = inst_sub->GetItem(indexAug);
if (!inst_aug) // extant only
continue;
item_data = inst_aug->GetItem();
linker.SetItemInst(inst_aug);
c->Message(
(item_data == nullptr),
"...%sAugSlot: %i (Slot #%i, Sub idx #%i, Aug idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
INVALID_INDEX,
((scopeBit & peekWorld) ? INVALID_INDEX : EQEmu::InventoryProfile::CalcSlotId(indexMain, indexSub)),
indexSub,
indexAug,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
}
}
}
}
if (scopeBit & peekLimbo) {
int limboIndex = 0;
for (auto it = targetClient->GetInv().cursor_cbegin(); (it != targetClient->GetInv().cursor_cend()); ++it, ++limboIndex) {
if (it == targetClient->GetInv().cursor_cbegin())
continue;
inst_main = *it;
if (inst_main) {
itemsFound = true;
item_data = inst_main->GetItem();
}
else {
item_data = nullptr;
}
linker.SetItemInst(inst_main);
c->Message(
(item_data == nullptr),
"%sSlot: %i, Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
(8000 + limboIndex),
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_main == nullptr) ? 0 : inst_main->GetCharges())
);
if (inst_main && inst_main->IsClassCommon()) {
for (uint8 indexAug = EQEmu::invaug::SOCKET_BEGIN; indexAug <= EQEmu::invaug::SOCKET_END; ++indexAug) {
inst_aug = inst_main->GetItem(indexAug);
if (!inst_aug) // extant only
continue;
item_data = inst_aug->GetItem();
linker.SetItemInst(inst_aug);
c->Message(
(item_data == nullptr),
".%sAugSlot: %i (Slot #%i, Aug idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
INVALID_INDEX,
(8000 + limboIndex),
indexAug,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
}
}
if (!scope_bag[scopeIndex] || !(inst_main && inst_main->IsClassBag()))
continue;
for (uint8 indexSub = EQEmu::invbag::SLOT_BEGIN; indexSub <= EQEmu::invbag::SLOT_END; ++indexSub) {
inst_sub = inst_main->GetItem(indexSub);
if (!inst_sub)
continue;
item_data = (inst_sub == nullptr) ? nullptr : inst_sub->GetItem();
linker.SetItemInst(inst_sub);
c->Message(
(item_data == nullptr),
"..%sBagSlot: %i (Slot #%i, Bag idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
INVALID_INDEX,
(8000 + limboIndex),
indexSub,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
if (inst_sub->IsClassCommon()) {
for (uint8 indexAug = EQEmu::invaug::SOCKET_BEGIN; indexAug <= EQEmu::invaug::SOCKET_END; ++indexAug) {
inst_aug = inst_sub->GetItem(indexAug);
if (!inst_aug) // extant only
continue;
item_data = inst_aug->GetItem();
linker.SetItemInst(inst_aug);
c->Message(
(item_data == nullptr),
"...%sAugSlot: %i (Slot #%i, Sub idx #%i, Aug idx #%i), Item: %i (%s), Charges: %i",
scope_prefix[scopeIndex],
INVALID_INDEX,
(8000 + limboIndex),
indexSub,
indexAug,
((item_data == nullptr) ? 0 : item_data->ID),
linker.GenerateLink().c_str(),
((inst_sub == nullptr) ? 0 : inst_sub->GetCharges())
);
}
}
}
}
}
}
if (!itemsFound)
c->Message(Chat::White, "No items found.");
}
void command_interrogateinv(Client *c, const Seperator *sep)
{
// 'command_interrogateinv' is an in-memory inventory interrogation tool only.
//
// it does not verify against actual database entries..but, the output can be
// used to verify that something has been corrupted in a player's inventory.
// any error condition should be assumed that the item in question will be
// lost when the player logs out or zones (or incurrs any action that will
// consume the Client-Inventory object instance in question.)
//
// any item instances located at a greater depth than a reported error should
// be treated as an error themselves regardless of whether they report as the
// same or not.
if (strcasecmp(sep->arg[1], "help") == 0) {
if (c->Admin() < commandInterrogateInv) {
c->Message(Chat::White, "Usage: #interrogateinv");
c->Message(Chat::White, " Displays your inventory's current in-memory nested storage references");
}
else {
c->Message(Chat::White, "Usage: #interrogateinv [log] [silent]");
c->Message(Chat::White, " Displays your or your Player target inventory's current in-memory nested storage references");
c->Message(Chat::White, " [log] - Logs interrogation to file");
c->Message(Chat::White, " [silent] - Omits the in-game message portion of the interrogation");
}
return;
}
Client* target = nullptr;
std::map<int16, const EQEmu::ItemInstance*> instmap;
bool log = false;
bool silent = false;
bool error = false;
bool allowtrip = false;
if (c->Admin() < commandInterrogateInv) {
if (c->GetInterrogateInvState()) {
c->Message(Chat::Red, "The last use of #interrogateinv on this inventory instance discovered an error...");
c->Message(Chat::Red, "Logging out, zoning or re-arranging items at this point will result in item loss!");
return;
}
target = c;
allowtrip = true;
}
else {
if (c->GetTarget() == nullptr) {
target = c;
}
else if (c->GetTarget()->IsClient()) {
target = c->GetTarget()->CastToClient();
}
else {
c->Message(Chat::Default, "Use of this command is limited to Client entities");
return;
}
if (strcasecmp(sep->arg[1], "log") == 0)
log = true;
if (strcasecmp(sep->arg[2], "silent") == 0)
silent = true;
}
bool success = target->InterrogateInventory(c, log, silent, allowtrip, error);
if (!success)
c->Message(Chat::Red, "An unknown error occurred while processing Client::InterrogateInventory()");
}
void command_invsnapshot(Client *c, const Seperator *sep)
{
if (!c)
return;
if (sep->argnum == 0 || strcmp(sep->arg[1], "help") == 0) {
std::string window_title = "Inventory Snapshot Argument Help Menu";
std::string window_text =
"<table>"
"<tr>"
"<td><c \"#FFFFFF\">Usage:</td>"
"<td></td>"
"<td>#invsnapshot arguments<br>(<c \"#00FF00\">required <c \"#FFFF00\">optional<c \"#FFFFFF\">)</td>"
"</tr>"
"<tr>"
"<td><c \"#FFFF00\">help</td>"
"<td></td>"
"<td><c \"#AAAAAA\">this menu</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">capture</td>"
"<td></td>"
"<td><c \"#AAAAAA\">takes snapshot of character inventory</td>"
"</tr>";
if (c->Admin() >= commandInvSnapshot)
window_text.append(
"<tr>"
"<td><c \"#00FF00\">gcount</td>"
"<td></td>"
"<td><c \"#AAAAAA\">returns global snapshot count</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">gclear</td>"
"<td><c \"#FFFF00\"><br>now</td>"
"<td><c \"#AAAAAA\">delete all snapshots - rule<br>delete all snapshots - now</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">count</td>"
"<td></td>"
"<td><c \"#AAAAAA\">returns character snapshot count</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">clear</td>"
"<td><c \"#FFFF00\"><br>now</td>"
"<td><c \"#AAAAAA\">delete character snapshots - rule<br>delete character snapshots - now</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">list</td>"
"<td><br><c \"#FFFF00\">count</td>"
"<td><c \"#AAAAAA\">lists entry ids for current character<br>limits to count</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">parse</td>"
"<td><c \"#00FF00\">tstmp</td>"
"<td><c \"#AAAAAA\">displays slots and items in snapshot</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">compare</td>"
"<td><c \"#00FF00\">tstmp</td>"
"<td><c \"#AAAAAA\">compares inventory against snapshot</td>"
"</tr>"
"<tr>"
"<td><c \"#00FF00\">restore</td>"
"<td><c \"#00FF00\">tstmp</td>"
"<td><c \"#AAAAAA\">restores slots and items in snapshot</td>"
"</tr>"
);
window_text.append(
"</table>"
);
c->SendPopupToClient(window_title.c_str(), window_text.c_str());
return;
}
if (c->Admin() >= commandInvSnapshot) { // global arguments
if (strcmp(sep->arg[1], "gcount") == 0) {
auto is_count = database.CountInvSnapshots();
c->Message(Chat::White, "There %s %i inventory snapshot%s.", (is_count == 1 ? "is" : "are"), is_count, (is_count == 1 ? "" : "s"));
return;
}
if (strcmp(sep->arg[1], "gclear") == 0) {
if (strcmp(sep->arg[2], "now") == 0) {
database.ClearInvSnapshots(true);
c->Message(Chat::White, "Inventory snapshots cleared using current time.");
}
else {
database.ClearInvSnapshots();
c->Message(Chat::White, "Inventory snapshots cleared using RuleI(Character, InvSnapshotHistoryD) (%i day%s).",
RuleI(Character, InvSnapshotHistoryD), (RuleI(Character, InvSnapshotHistoryD) == 1 ? "" : "s"));
}
return;
}
}
if (!c->GetTarget() || !c->GetTarget()->IsClient()) {
c->Message(Chat::White, "Target must be a client.");
return;
}
auto tc = (Client*)c->GetTarget();
if (strcmp(sep->arg[1], "capture") == 0) {
if (database.SaveCharacterInvSnapshot(tc->CharacterID())) {
tc->SetNextInvSnapshot(RuleI(Character, InvSnapshotMinIntervalM));
c->Message(Chat::White, "Successful inventory snapshot taken of %s - setting next interval for %i minute%s.",
tc->GetName(), RuleI(Character, InvSnapshotMinIntervalM), (RuleI(Character, InvSnapshotMinIntervalM) == 1 ? "" : "s"));
}
else {
tc->SetNextInvSnapshot(RuleI(Character, InvSnapshotMinRetryM));
c->Message(Chat::White, "Failed to take inventory snapshot of %s - retrying in %i minute%s.",
tc->GetName(), RuleI(Character, InvSnapshotMinRetryM), (RuleI(Character, InvSnapshotMinRetryM) == 1 ? "" : "s"));
}
return;
}
if (c->Admin() >= commandInvSnapshot) {
if (strcmp(sep->arg[1], "count") == 0) {
auto is_count = database.CountCharacterInvSnapshots(tc->CharacterID());
c->Message(Chat::White, "%s (id: %u) has %i inventory snapshot%s.", tc->GetName(), tc->CharacterID(), is_count, (is_count == 1 ? "" : "s"));
return;
}
if (strcmp(sep->arg[1], "clear") == 0) {
if (strcmp(sep->arg[2], "now") == 0) {
database.ClearCharacterInvSnapshots(tc->CharacterID(), true);
c->Message(Chat::White, "%s\'s (id: %u) inventory snapshots cleared using current time.", tc->GetName(), tc->CharacterID());
}
else {
database.ClearCharacterInvSnapshots(tc->CharacterID());
c->Message(Chat::White, "%s\'s (id: %u) inventory snapshots cleared using RuleI(Character, InvSnapshotHistoryD) (%i day%s).",
tc->GetName(), tc->CharacterID(), RuleI(Character, InvSnapshotHistoryD), (RuleI(Character, InvSnapshotHistoryD) == 1 ? "" : "s"));
}
return;
}
if (strcmp(sep->arg[1], "list") == 0) {
std::list<std::pair<uint32, int>> is_list;
database.ListCharacterInvSnapshots(tc->CharacterID(), is_list);
if (is_list.empty()) {
c->Message(Chat::White, "No inventory snapshots for %s (id: %u)", tc->GetName(), tc->CharacterID());
return;
}
auto list_count = 0;
if (sep->IsNumber(2))
list_count = atoi(sep->arg[2]);
if (list_count < 1 || list_count > is_list.size())
list_count = is_list.size();
std::string window_title = StringFormat("Snapshots for %s", tc->GetName());
std::string window_text =
"<table>"
"<tr>"
"<td>Timestamp</td>"
"<td>Entry Count</td>"
"</tr>";
for (auto iter : is_list) {
if (!list_count)
break;
window_text.append(StringFormat(
"<tr>"
"<td>%u</td>"
"<td>%i</td>"
"</tr>",
iter.first,
iter.second
));
--list_count;
}
window_text.append(
"</table>"
);
c->SendPopupToClient(window_title.c_str(), window_text.c_str());
return;
}
if (strcmp(sep->arg[1], "parse") == 0) {
if (!sep->IsNumber(2)) {
c->Message(Chat::White, "A timestamp is required to use this option.");
return;
}
uint32 timestamp = atoul(sep->arg[2]);
if (!database.ValidateCharacterInvSnapshotTimestamp(tc->CharacterID(), timestamp)) {
c->Message(Chat::White, "No inventory snapshots for %s (id: %u) exist at %u.", tc->GetName(), tc->CharacterID(), timestamp);
return;
}
std::list<std::pair<int16, uint32>> parse_list;
database.ParseCharacterInvSnapshot(tc->CharacterID(), timestamp, parse_list);
std::string window_title = StringFormat("Snapshot Parse for %s @ %u", tc->GetName(), timestamp);
std::string window_text = "Slot: ItemID - Description<br>";
for (auto iter : parse_list) {
auto item_data = database.GetItem(iter.second);
std::string window_line = StringFormat("%i: %u - %s<br>", iter.first, iter.second, (item_data ? item_data->Name : "[error]"));
if (window_text.length() + window_line.length() < 4095) {
window_text.append(window_line);
}
else {
c->Message(Chat::White, "Too many snapshot entries to list...");
break;
}
}
c->SendPopupToClient(window_title.c_str(), window_text.c_str());
return;
}
if (strcmp(sep->arg[1], "compare") == 0) {
if (!sep->IsNumber(2)) {
c->Message(Chat::White, "A timestamp is required to use this option.");
return;
}
uint32 timestamp = atoul(sep->arg[2]);
if (!database.ValidateCharacterInvSnapshotTimestamp(tc->CharacterID(), timestamp)) {
c->Message(Chat::White, "No inventory snapshots for %s (id: %u) exist at %u.", tc->GetName(), tc->CharacterID(), timestamp);
return;
}
std::list<std::pair<int16, uint32>> inv_compare_list;
database.DivergeCharacterInventoryFromInvSnapshot(tc->CharacterID(), timestamp, inv_compare_list);
std::list<std::pair<int16, uint32>> iss_compare_list;
database.DivergeCharacterInvSnapshotFromInventory(tc->CharacterID(), timestamp, iss_compare_list);
std::string window_title = StringFormat("Snapshot Comparison for %s @ %u", tc->GetName(), timestamp);
std::string window_text = "Slot: (action) Snapshot -> Inventory<br>";
auto inv_iter = inv_compare_list.begin();
auto iss_iter = iss_compare_list.begin();
while (true) {
std::string window_line;
if (inv_iter == inv_compare_list.end() && iss_iter == iss_compare_list.end()) {
break;
}
else if (inv_iter != inv_compare_list.end() && iss_iter == iss_compare_list.end()) {
window_line = StringFormat("%i: (delete) [empty] -> %u<br>", inv_iter->first, inv_iter->second);
++inv_iter;
}
else if (inv_iter == inv_compare_list.end() && iss_iter != iss_compare_list.end()) {
window_line = StringFormat("%i: (insert) %u -> [empty]<br>", iss_iter->first, iss_iter->second);
++iss_iter;
}
else {
if (inv_iter->first < iss_iter->first) {
window_line = StringFormat("%i: (delete) [empty] -> %u<br>", inv_iter->first, inv_iter->second);
++inv_iter;
}
else if (inv_iter->first > iss_iter->first) {
window_line = StringFormat("%i: (insert) %u -> [empty]<br>", iss_iter->first, iss_iter->second);
++iss_iter;
}
else {
window_line = StringFormat("%i: (replace) %u -> %u<br>", iss_iter->first, iss_iter->second, inv_iter->second);
++inv_iter;
++iss_iter;
}
}
if (window_text.length() + window_line.length() < 4095) {
window_text.append(window_line);
}
else {
c->Message(Chat::White, "Too many comparison entries to list...");
break;
}
}
c->SendPopupToClient(window_title.c_str(), window_text.c_str());
return;
}
if (strcmp(sep->arg[1], "restore") == 0) {
if (!sep->IsNumber(2)) {
c->Message(Chat::White, "A timestamp is required to use this option.");
return;
}
uint32 timestamp = atoul(sep->arg[2]);
if (!database.ValidateCharacterInvSnapshotTimestamp(tc->CharacterID(), timestamp)) {
c->Message(Chat::White, "No inventory snapshots for %s (id: %u) exist at %u.", tc->GetName(), tc->CharacterID(), timestamp);
return;
}
if (database.SaveCharacterInvSnapshot(tc->CharacterID())) {
tc->SetNextInvSnapshot(RuleI(Character, InvSnapshotMinIntervalM));
}
else {
c->Message(Chat::Red, "Failed to take pre-restore inventory snapshot of %s (id: %u).",
tc->GetName(), tc->CharacterID());
return;
}
if (database.RestoreCharacterInvSnapshot(tc->CharacterID(), timestamp)) {
// cannot delete all valid item slots from client..so, we worldkick
tc->WorldKick(); // self restores update before the 'kick' is processed
c->Message(Chat::White, "Successfully applied snapshot %u to %s's (id: %u) inventory.",
timestamp, tc->GetName(), tc->CharacterID());
}
else {
c->Message(Chat::Red, "Failed to apply snapshot %u to %s's (id: %u) inventory.",
timestamp, tc->GetName(), tc->CharacterID());
}
return;
}
}
}
void command_findnpctype(Client *c, const Seperator *sep)
{
if(sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #findnpctype [search criteria]");
return;
}
std::string query;
int id = atoi((const char *)sep->arg[1]);
if (id == 0) // If id evaluates to 0, then search as if user entered a string.
query = StringFormat("SELECT id, name FROM npc_types WHERE name LIKE '%%%s%%'", sep->arg[1]);
else // Otherwise, look for just that npc id.
query = StringFormat("SELECT id, name FROM npc_types WHERE id = %i", id);
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message (0, "Error querying database.");
c->Message (0, query.c_str());
}
if (results.RowCount() == 0) // No matches found.
c->Message (0, "No matches found for %s.", sep->arg[1]);
// If query runs successfully.
int count = 0;
const int maxrows = 20;
// Process each row returned.
for (auto row = results.begin(); row != results.end(); ++row) {
// Limit to returning maxrows rows.
if (++count > maxrows) {
c->Message (0, "%i npc types shown. Too many results.", maxrows);
break;
}
c->Message (0, " %s: %s", row[0], row[1]);
}
// If we did not hit the maxrows limit.
if (count <= maxrows)
c->Message (0, "Query complete. %i rows shown.", count);
}
void command_faction(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #faction -- Displays Target NPC's Primary faction");
c->Message(Chat::White, "Usage: #faction Find [criteria | all] -- Displays factions name & id");
c->Message(Chat::White, "Usage: #faction Review [criteria | all] -- Review Targeted Players faction hits");
c->Message(Chat::White, "Usage: #faction Reset [id] -- Reset Targeted Players specified faction to base");
uint32 npcfac;
std::string npcname;
if (c->GetTarget() && c->GetTarget()->IsNPC()) {
npcfac = c->GetTarget()->CastToNPC()->GetPrimaryFaction();
npcname = c->GetTarget()->CastToNPC()->GetCleanName();
std::string blurb = fmt::format("( Target Npc: {} : has primary faction id: {} )", npcname, npcfac);
c->Message(Chat::Yellow, blurb.c_str());
c->Message(Chat::White, "Use: #setfaction [id] - to alter an NPC's faction");
}
return;
}
std::string faction_filter;
if (sep->arg[2]) {
faction_filter = str_tolower(sep->arg[2]);
}
if (strcasecmp(sep->arg[1], "find") == 0) {
std::string query;
if (strcasecmp(sep->arg[2], "all") == 0) {
query = "SELECT `id`,`name` FROM `faction_list`";
}
else {
query = fmt::format("SELECT `id`,`name` FROM `faction_list` WHERE `name` LIKE '%{}%'", faction_filter.c_str());
}
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
if (results.RowCount() == 0) {
c->Message(Chat::Yellow, "No factions found with specified criteria");
return;
}
int _ctr = 0;
for (auto row = results.begin(); row != results.end(); ++row) {
auto id = static_cast<uint32>(atoi(row[0]));
std::string name = row[1];
_ctr++;
c->Message(Chat::Yellow, "%s : id: %s", name.c_str(), std::to_string(id).c_str());
}
std::string response = _ctr > 0 ? fmt::format("Found {} matching factions", _ctr).c_str() : "No factions found.";
c->Message(Chat::Yellow, response.c_str());
}
if (strcasecmp(sep->arg[1], "review") == 0) {
if (!(c->GetTarget() && c->GetTarget()->IsClient())) {
c->Message(Chat::Red, "Player Target Required for faction review");
return;
}
uint32 charid = c->GetTarget()->CastToClient()->CharacterID();
std::string revquery;
if (strcasecmp(sep->arg[2], "all") == 0) {
revquery = fmt::format(
"SELECT id,`name`, current_value FROM faction_list INNER JOIN faction_values ON faction_list.id = faction_values.faction_id WHERE char_id = {}", charid);
}
else
{
revquery = fmt::format(
"SELECT id,`name`, current_value FROM faction_list INNER JOIN faction_values ON faction_list.id = faction_values.faction_id WHERE `name` like '%{}%' and char_id = {}", faction_filter.c_str(), charid);
}
auto revresults = database.QueryDatabase(revquery);
if (!revresults.Success())
return;
if (revresults.RowCount() == 0) {
c->Message(Chat::Yellow, "No faction hits found. All are at base level");
return;
}
int _ctr2 = 0;
for (auto rrow = revresults.begin(); rrow != revresults.end(); ++rrow) {
auto f_id = static_cast<uint32>(atoi(rrow[0]));
std::string cname = rrow[1];
std::string fvalue = rrow[2];
_ctr2++;
std::string resetlink = fmt::format("#faction reset {}", f_id);
c->Message(Chat::Yellow, "Reset: %s id: %s (%s)", EQEmu::SayLinkEngine::GenerateQuestSaylink(resetlink, false, cname.c_str()).c_str(), std::to_string(f_id).c_str(), fvalue.c_str());
}
std::string response = _ctr2 > 0 ? fmt::format("Found {} matching factions", _ctr2).c_str() : "No faction hits found.";
c->Message(Chat::Yellow, response.c_str());
}
else if (strcasecmp(sep->arg[1], "reset") == 0)
{
if (!(faction_filter == "")) {
if (c->GetTarget() && c->GetTarget()->IsClient())
{
if (!c->CastToClient()->GetFeigned() && c->CastToClient()->GetAggroCount() == 0)
{
uint32 charid = c->GetTarget()->CastToClient()->CharacterID();
uint32 factionid = atoi(faction_filter.c_str());
if (c->GetTarget()->CastToClient()->ReloadCharacterFaction(c->GetTarget()->CastToClient(), factionid, charid))
c->Message(Chat::Yellow, "faction %u was cleared.", factionid);
else
c->Message(Chat::Red, "An error occurred clearing faction %u", factionid);
}
else
{
c->Message(Chat::Red, "Cannot be in Combat");
return;
}
}
else {
c->Message(Chat::Red, "Player Target Required (whose not feigning death)");
return;
}
}
else
c->Message(Chat::Red, "No faction id entered");
}
}
void command_findzone(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::White, "Usage: #findzone [search criteria]");
c->Message(Chat::White, "Usage: #findzone expansion [expansion number]");
return;
}
std::string query;
int id = atoi((const char *) sep->arg[1]);
std::string arg1 = sep->arg[1];
if (arg1 == "expansion") {
query = fmt::format(
"SELECT zoneidnumber, short_name, long_name, version FROM zone WHERE expansion = {}",
sep->arg[2]
);
}
else {
/**
* If id evaluates to 0, then search as if user entered a string
*/
if (id == 0) {
query = fmt::format(
"SELECT zoneidnumber, short_name, long_name, version FROM zone WHERE long_name LIKE '%{}%'",
EscapeString(sep->arg[1])
);
}
else {
query = fmt::format(
"SELECT zoneidnumber, short_name, long_name, version FROM zone WHERE zoneidnumber = {}",
id
);
}
}
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Error querying database.");
c->Message(Chat::White, query.c_str());
return;
}
int count = 0;
const int maxrows = 100;
for (auto row = results.begin(); row != results.end(); ++row) {
std::string zone_id = row[0];
std::string short_name = row[1];
std::string long_name = row[2];
int version = atoi(row[3]);
if (++count > maxrows) {
c->Message(Chat::White, "%i zones shown. Too many results.", maxrows);
break;
}
std::string command_zone = EQEmu::SayLinkEngine::GenerateQuestSaylink("#zone " + short_name, false, "zone");
std::string command_gmzone = EQEmu::SayLinkEngine::GenerateQuestSaylink(
fmt::format("#gmzone {} {}", short_name, version),
false,
"gmzone"
);
c->Message(
Chat::White,
fmt::format(
"[{}] [{}] [{}] Version ({}) [{}]",
(version == 0 ? command_zone : "zone"),
command_gmzone,
short_name,
version,
long_name
).c_str()
);
}
if (count <= maxrows) {
c->Message(
Chat::White,
"Query complete. %i rows shown. %s",
count,
(arg1 == "expansion" ? "(expansion search)" : ""));
}
else if (count == 0) {
c->Message(Chat::White, "No matches found for %s.", sep->arg[1]);
}
}
void command_viewnpctype(Client *c, const Seperator *sep)
{
if (!sep->IsNumber(1))
c->Message(Chat::White, "Usage: #viewnpctype [npctype id]");
else
{
uint32 npctypeid=atoi(sep->arg[1]);
const NPCType* npct = database.LoadNPCTypesData(npctypeid);
if (npct) {
c->Message(Chat::White, " NPCType Info, ");
c->Message(Chat::White, " NPCTypeID: %u", npct->npc_id);
c->Message(Chat::White, " Name: %s", npct->name);
c->Message(Chat::White, " Level: %i", npct->level);
c->Message(Chat::White, " Race: %i", npct->race);
c->Message(Chat::White, " Class: %i", npct->class_);
c->Message(Chat::White, " MinDmg: %i", npct->min_dmg);
c->Message(Chat::White, " MaxDmg: %i", npct->max_dmg);
c->Message(Chat::White, " Special Abilities: %s", npct->special_abilities);
c->Message(Chat::White, " Spells: %i", npct->npc_spells_id);
c->Message(Chat::White, " Loot Table: %i", npct->loottable_id);
c->Message(Chat::White, " NPCFactionID: %i", npct->npc_faction_id);
}
else
c->Message(Chat::White, "NPC #%d not found", npctypeid);
}
}
void command_reloadqst(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
{
c->Message(Chat::White, "Clearing quest memory cache.");
entity_list.ClearAreas();
parse->ReloadQuests();
}
else
{
c->Message(Chat::White, "Clearing quest memory cache and stopping timers.");
entity_list.ClearAreas();
parse->ReloadQuests(true);
}
}
void command_corpsefix(Client *c, const Seperator *sep)
{
entity_list.CorpseFix(c);
}
void command_reloadworld(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "Reloading quest cache and repopping zones worldwide.");
auto pack = new ServerPacket(ServerOP_ReloadWorld, sizeof(ReloadWorld_Struct));
ReloadWorld_Struct* RW = (ReloadWorld_Struct*) pack->pBuffer;
RW->Option = ((atoi(sep->arg[1]) == 1) ? 1 : 0);
worldserver.SendPacket(pack);
safe_delete(pack);
}
void command_reloadmerchants(Client *c, const Seperator *sep) {
entity_list.ReloadMerchants();
c->Message(Chat::Yellow, "Reloading merchants.");
}
void command_reloadlevelmods(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
{
if(RuleB(Zone, LevelBasedEXPMods)){
zone->LoadLevelEXPMods();
c->Message(Chat::Yellow, "Level based EXP Mods have been reloaded zonewide");
}else{
c->Message(Chat::Yellow, "Level based EXP Mods are disabled in rules!");
}
}
}
void command_reloadzps(Client *c, const Seperator *sep)
{
database.LoadStaticZonePoints(&zone->zone_point_list, zone->GetShortName(), zone->GetInstanceVersion());
c->Message(Chat::White, "Reloading server zone_points.");
}
void command_zoneshutdown(Client *c, const Seperator *sep)
{
if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Usage: #zoneshutdown zoneshortname");
else {
auto pack = new ServerPacket(ServerOP_ZoneShutdown, sizeof(ServerZoneStateChange_struct));
ServerZoneStateChange_struct* s = (ServerZoneStateChange_struct *) pack->pBuffer;
strcpy(s->adminname, c->GetName());
if (sep->arg[1][0] >= '0' && sep->arg[1][0] <= '9')
s->ZoneServerID = atoi(sep->arg[1]);
else
s->zoneid = database.GetZoneID(sep->arg[1]);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void command_zonebootup(Client *c, const Seperator *sep)
{
if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else if (sep->arg[2][0] == 0) {
c->Message(Chat::White, "Usage: #zonebootup ZoneServerID# zoneshortname");
}
else {
auto pack = new ServerPacket(ServerOP_ZoneBootup, sizeof(ServerZoneStateChange_struct));
ServerZoneStateChange_struct* s = (ServerZoneStateChange_struct *) pack->pBuffer;
s->ZoneServerID = atoi(sep->arg[1]);
strcpy(s->adminname, c->GetName());
s->zoneid = database.GetZoneID(sep->arg[2]);
s->makestatic = (bool) (strcasecmp(sep->arg[3], "static") == 0);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void command_kick(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Usage: #kick [charname]");
else {
Client* client = entity_list.GetClientByName(sep->arg[1]);
if (client != 0) {
if (client->Admin() <= c->Admin()) {
client->Message(Chat::White, "You have been kicked by %s", c->GetName());
auto outapp = new EQApplicationPacket(OP_GMKick, 0);
client->QueuePacket(outapp);
client->Kick("Ordered kicked by command");
c->Message(Chat::White, "Kick: local: kicking %s", sep->arg[1]);
}
}
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_KickPlayer, sizeof(ServerKickPlayer_Struct));
ServerKickPlayer_Struct* skp = (ServerKickPlayer_Struct*) pack->pBuffer;
strcpy(skp->adminname, c->GetName());
strcpy(skp->name, sep->arg[1]);
skp->adminrank = c->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
}
void command_attack(Client *c, const Seperator *sep)
{
if (c->GetTarget() && c->GetTarget()->IsNPC() && sep->arg[1] != 0) {
Mob* sictar = entity_list.GetMob(sep->argplus[1]);
if (sictar)
c->GetTarget()->CastToNPC()->AddToHateList(sictar, 1, 0);
else
c->Message(Chat::White, "Error: %s not found", sep->arg[1]);
}
else
c->Message(Chat::White, "Usage: (needs NPC targeted) #attack targetname");
}
void command_lock(Client *c, const Seperator *sep)
{
auto outpack = new ServerPacket(ServerOP_Lock, sizeof(ServerLock_Struct));
ServerLock_Struct* lss = (ServerLock_Struct*) outpack->pBuffer;
strcpy(lss->myname, c->GetName());
lss->mode = 1;
worldserver.SendPacket(outpack);
safe_delete(outpack);
}
void command_unlock(Client *c, const Seperator *sep)
{
auto outpack = new ServerPacket(ServerOP_Lock, sizeof(ServerLock_Struct));
ServerLock_Struct* lss = (ServerLock_Struct*) outpack->pBuffer;
strcpy(lss->myname, c->GetName());
lss->mode = 0;
worldserver.SendPacket(outpack);
safe_delete(outpack);
}
void command_motd(Client *c, const Seperator *sep)
{
auto outpack = new ServerPacket(ServerOP_Motd, sizeof(ServerMotd_Struct));
ServerMotd_Struct* mss = (ServerMotd_Struct*) outpack->pBuffer;
strn0cpy(mss->myname, c->GetName(),64);
strn0cpy(mss->motd, sep->argplus[1],512);
worldserver.SendPacket(outpack);
safe_delete(outpack);
}
void command_listpetition(Client *c, const Seperator *sep)
{
std::string query = "SELECT petid, charname, accountname FROM petitions ORDER BY petid";
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
LogInfo("Petition list requested by [{}]", c->GetName());
if (results.RowCount() == 0)
return;
c->Message(Chat::Red," ID : Character Name , Account Name");
for (auto row = results.begin(); row != results.end(); ++row)
c->Message(Chat::Yellow, " %s: %s , %s ", row[0],row[1],row[2]);
}
void command_equipitem(Client *c, const Seperator *sep)
{
uint32 slot_id = atoi(sep->arg[1]);
if (sep->IsNumber(1) && (slot_id >= EQEmu::invslot::EQUIPMENT_BEGIN && slot_id <= EQEmu::invslot::EQUIPMENT_END)) {
const EQEmu::ItemInstance* from_inst = c->GetInv().GetItem(EQEmu::invslot::slotCursor);
const EQEmu::ItemInstance* to_inst = c->GetInv().GetItem(slot_id); // added (desync issue when forcing stack to stack)
bool partialmove = false;
int16 movecount;
if (from_inst && from_inst->IsClassCommon()) {
auto outapp = new EQApplicationPacket(OP_MoveItem, sizeof(MoveItem_Struct));
MoveItem_Struct* mi = (MoveItem_Struct*)outapp->pBuffer;
mi->from_slot = EQEmu::invslot::slotCursor;
mi->to_slot = slot_id;
// mi->number_in_stack = from_inst->GetCharges(); // replaced with con check for stacking
// crude stackable check to only 'move' the difference count on client instead of entire stack when applicable
if (to_inst && to_inst->IsStackable() &&
(to_inst->GetItem()->ID == from_inst->GetItem()->ID) &&
(to_inst->GetCharges() < to_inst->GetItem()->StackSize) &&
(from_inst->GetCharges() > to_inst->GetItem()->StackSize - to_inst->GetCharges())) {
movecount = to_inst->GetItem()->StackSize - to_inst->GetCharges();
mi->number_in_stack = (uint32)movecount;
partialmove = true;
}
else
mi->number_in_stack = from_inst->GetCharges();
// Save move changes
// Added conditional check to packet send..would have sent change even on a swap failure..whoops!
if (partialmove) { // remove this con check if someone can figure out removing charges from cursor stack issue below
// mi->number_in_stack is always from_inst->GetCharges() when partialmove is false
c->Message(Chat::Red, "Error: Partial stack added to existing stack exceeds allowable stacksize");
safe_delete(outapp);
return;
}
else if(c->SwapItem(mi)) {
c->FastQueuePacket(&outapp);
// if the below code is still needed..just send an an item trade packet to each slot..it should overwrite the client instance
// below code has proper logic, but client does not like to have cursor charges changed
// (we could delete the cursor item and resend, but issues would arise if there are queued items)
//if (partialmove) {
// EQApplicationPacket* outapp2 = new EQApplicationPacket(OP_DeleteItem, sizeof(DeleteItem_Struct));
// DeleteItem_Struct* di = (DeleteItem_Struct*)outapp2->pBuffer;
// di->from_slot = SLOT_CURSOR;
// di->to_slot = 0xFFFFFFFF;
// di->number_in_stack = 0xFFFFFFFF;
// c->Message(Chat::White, "Deleting %i charges from stack", movecount); // debug line..delete
// for (int16 deletecount=0; deletecount < movecount; deletecount++)
// have to use 'movecount' because mi->number_in_stack is 'ENCODED' at this point (i.e., 99 charges returns 22...)
// c->QueuePacket(outapp2);
// safe_delete(outapp2);
//}
}
else {
c->Message(Chat::Red, "Error: Unable to equip current item");
}
safe_delete(outapp);
// also send out a wear change packet?
}
else if (from_inst == nullptr)
c->Message(Chat::Red, "Error: There is no item on your cursor");
else
c->Message(Chat::Red, "Error: Item on your cursor cannot be equipped");
}
else
c->Message(Chat::White, "Usage: #equipitem slotid[0-21] - equips the item on your cursor to the position");
}
void command_zonelock(Client *c, const Seperator *sep)
{
auto pack = new ServerPacket(ServerOP_LockZone, sizeof(ServerLockZone_Struct));
ServerLockZone_Struct* s = (ServerLockZone_Struct*) pack->pBuffer;
strn0cpy(s->adminname, c->GetName(), sizeof(s->adminname));
if (strcasecmp(sep->arg[1], "list") == 0) {
s->op = 0;
worldserver.SendPacket(pack);
}
else if (strcasecmp(sep->arg[1], "lock") == 0 && c->Admin() >= commandLockZones) {
uint16 tmp = database.GetZoneID(sep->arg[2]);
if (tmp) {
s->op = 1;
s->zoneID = tmp;
worldserver.SendPacket(pack);
}
else
c->Message(Chat::White, "Usage: #zonelock lock [zonename]");
}
else if (strcasecmp(sep->arg[1], "unlock") == 0 && c->Admin() >= commandLockZones) {
uint16 tmp = database.GetZoneID(sep->arg[2]);
if (tmp) {
s->op = 2;
s->zoneID = tmp;
worldserver.SendPacket(pack);
}
else
c->Message(Chat::White, "Usage: #zonelock unlock [zonename]");
}
else {
c->Message(Chat::White, "#zonelock sub-commands");
c->Message(Chat::White, " list");
if(c->Admin() >= commandLockZones)
{
c->Message(Chat::White, " lock [zonename]");
c->Message(Chat::White, " unlock [zonename]");
}
}
safe_delete(pack);
}
void command_corpse(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (strcasecmp(sep->arg[1], "DeletePlayerCorpses") == 0 && c->Admin() >= commandEditPlayerCorpses) {
int32 tmp = entity_list.DeletePlayerCorpses();
if (tmp >= 0)
c->Message(Chat::White, "%i corpses deleted.", tmp);
else
c->Message(Chat::White, "DeletePlayerCorpses Error #%i", tmp);
}
else if (strcasecmp(sep->arg[1], "delete") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target the corpse you wish to delete");
else if (target->IsNPCCorpse()) {
c->Message(Chat::White, "Depoping %s.", target->GetName());
target->CastToCorpse()->Delete();
}
else if (c->Admin() >= commandEditPlayerCorpses) {
c->Message(Chat::White, "Deleting %s.", target->GetName());
target->CastToCorpse()->Delete();
}
else
c->Message(Chat::White, "Insufficient status to delete player corpse.");
}
else if (strcasecmp(sep->arg[1], "ListNPC") == 0) {
entity_list.ListNPCCorpses(c);
}
else if (strcasecmp(sep->arg[1], "ListPlayer") == 0) {
entity_list.ListPlayerCorpses(c);
}
else if (strcasecmp(sep->arg[1], "DeleteNPCCorpses") == 0) {
int32 tmp = entity_list.DeleteNPCCorpses();
if (tmp >= 0)
c->Message(Chat::White, "%d corpses deleted.", tmp);
else
c->Message(Chat::White, "DeletePlayerCorpses Error #%d", tmp);
}
else if (strcasecmp(sep->arg[1], "charid") == 0 && c->Admin() >= commandEditPlayerCorpses) {
if (target == 0 || !target->IsPlayerCorpse())
c->Message(Chat::White, "Error: Target must be a player corpse.");
else if (!sep->IsNumber(2))
c->Message(Chat::White, "Error: charid must be a number.");
else
c->Message(Chat::White, "Setting CharID=%u on PlayerCorpse '%s'", target->CastToCorpse()->SetCharID(atoi(sep->arg[2])), target->GetName());
}
else if (strcasecmp(sep->arg[1], "ResetLooter") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target the corpse you wish to reset");
else
target->CastToCorpse()->ResetLooter();
}
else if (strcasecmp(sep->arg[1], "RemoveCash") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target the corpse you wish to remove the cash from");
else if (!target->IsPlayerCorpse() || c->Admin() >= commandEditPlayerCorpses) {
c->Message(Chat::White, "Removing Cash from %s.", target->GetName());
target->CastToCorpse()->RemoveCash();
}
else
c->Message(Chat::White, "Insufficient status to modify player corpse.");
}
else if (strcasecmp(sep->arg[1], "InspectLoot") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target must be a corpse.");
else
target->CastToCorpse()->QueryLoot(c);
}
else if (strcasecmp(sep->arg[1], "lock") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target must be a corpse.");
else {
target->CastToCorpse()->Lock();
c->Message(Chat::White, "Locking %s...", target->GetName());
}
}
else if (strcasecmp(sep->arg[1], "unlock") == 0) {
if (target == 0 || !target->IsCorpse())
c->Message(Chat::White, "Error: Target must be a corpse.");
else {
target->CastToCorpse()->UnLock();
c->Message(Chat::White, "Unlocking %s...", target->GetName());
}
}
else if (strcasecmp(sep->arg[1], "depop") == 0) {
if (target == 0 || !target->IsPlayerCorpse())
c->Message(Chat::White, "Error: Target must be a player corpse.");
else if (c->Admin() >= commandEditPlayerCorpses && target->IsPlayerCorpse()) {
c->Message(Chat::White, "Depoping %s.", target->GetName());
target->CastToCorpse()->DepopPlayerCorpse();
if(!sep->arg[2][0] || atoi(sep->arg[2]) != 0)
target->CastToCorpse()->Bury();
}
else
c->Message(Chat::White, "Insufficient status to depop player corpse.");
}
else if (strcasecmp(sep->arg[1], "depopall") == 0) {
if (target == 0 || !target->IsClient())
c->Message(Chat::White, "Error: Target must be a player.");
else if (c->Admin() >= commandEditPlayerCorpses && target->IsClient()) {
c->Message(Chat::White, "Depoping %s\'s corpses.", target->GetName());
target->CastToClient()->DepopAllCorpses();
if(!sep->arg[2][0] || atoi(sep->arg[2]) != 0)
target->CastToClient()->BuryPlayerCorpses();
}
else
c->Message(Chat::White, "Insufficient status to depop player corpse.");
}
else if (sep->arg[1][0] == 0 || strcasecmp(sep->arg[1], "help") == 0) {
c->Message(Chat::White, "#Corpse Sub-Commands:");
c->Message(Chat::White, " DeleteNPCCorpses");
c->Message(Chat::White, " Delete - Delete targetted corpse");
c->Message(Chat::White, " ListNPC");
c->Message(Chat::White, " ListPlayer");
c->Message(Chat::White, " Lock - GM locks the corpse - cannot be looted by non-GM");
c->Message(Chat::White, " UnLock");
c->Message(Chat::White, " RemoveCash");
c->Message(Chat::White, " InspectLoot");
c->Message(Chat::White, " [to remove items from corpses, loot them]");
c->Message(Chat::White, "Lead-GM status required to delete/modify player corpses");
c->Message(Chat::White, " DeletePlayerCorpses");
c->Message(Chat::White, " CharID [charid] - change player corpse's owner");
c->Message(Chat::White, " Depop [bury] - Depops single target corpse.");
c->Message(Chat::White, " Depopall [bury] - Depops all target player's corpses.");
c->Message(Chat::White, "Set bury to 0 to skip burying the corpses.");
}
else
c->Message(Chat::White, "Error, #corpse sub-command not found");
}
void command_fixmob(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
const char* Usage = "Usage: #fixmob [race|gender|texture|helm|face|hair|haircolor|beard|beardcolor|heritage|tattoo|detail] [next|prev]";
if (!sep->arg[1])
c->Message(Chat::White,Usage);
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else
{
uint32 Adjustment = 1; // Previous or Next
char codeMove = 0;
if (sep->arg[2])
{
char* command2 = sep->arg[2];
codeMove = (command2[0] | 0x20); // First character, lower-cased
if (codeMove == 'n')
Adjustment = 1;
else if (codeMove == 'p')
Adjustment = -1;
}
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
const char* ChangeType = nullptr; // If it's still nullptr after processing, they didn't send a valid command
uint32 ChangeSetting;
char* command = sep->arg[1];
if (strcasecmp(command, "race") == 0)
{
if (Race == 1 && codeMove == 'p')
Race = 724;
else if (Race >= 724 && codeMove != 'p')
Race = 1;
else
Race += Adjustment;
ChangeType = "Race";
ChangeSetting = Race;
}
else if (strcasecmp(command, "gender") == 0)
{
if (Gender == 0 && codeMove == 'p')
Gender = 2;
else if (Gender >= 2 && codeMove != 'p')
Gender = 0;
else
Gender += Adjustment;
ChangeType = "Gender";
ChangeSetting = Gender;
}
else if (strcasecmp(command, "texture") == 0)
{
Texture = target->GetTexture();
if (Texture == 0 && codeMove == 'p')
Texture = 25;
else if (Texture >= 25 && codeMove != 'p')
Texture = 0;
else
Texture += Adjustment;
ChangeType = "Texture";
ChangeSetting = Texture;
}
else if (strcasecmp(command, "helm") == 0)
{
HelmTexture = target->GetHelmTexture();
if (HelmTexture == 0 && codeMove == 'p')
HelmTexture = 25;
else if (HelmTexture >= 25 && codeMove != 'p')
HelmTexture = 0;
else
HelmTexture += Adjustment;
ChangeType = "HelmTexture";
ChangeSetting = HelmTexture;
}
else if (strcasecmp(command, "face") == 0)
{
if (LuclinFace == 0 && codeMove == 'p')
LuclinFace = 87;
else if (LuclinFace >= 87 && codeMove != 'p')
LuclinFace = 0;
else
LuclinFace += Adjustment;
ChangeType = "LuclinFace";
ChangeSetting = LuclinFace;
}
else if (strcasecmp(command, "hair") == 0)
{
if (HairStyle == 0 && codeMove == 'p')
HairStyle = 8;
else if (HairStyle >= 8 && codeMove != 'p')
HairStyle = 0;
else
HairStyle += Adjustment;
ChangeType = "HairStyle";
ChangeSetting = HairStyle;
}
else if (strcasecmp(command, "haircolor") == 0)
{
if (HairColor == 0 && codeMove == 'p')
HairColor = 24;
else if (HairColor >= 24 && codeMove != 'p')
HairColor = 0;
else
HairColor += Adjustment;
ChangeType = "HairColor";
ChangeSetting = HairColor;
}
else if (strcasecmp(command, "beard") == 0)
{
if (Beard == 0 && codeMove == 'p')
Beard = 11;
else if (Beard >= 11 && codeMove != 'p')
Beard = 0;
else
Beard += Adjustment;
ChangeType = "Beard";
ChangeSetting = Beard;
}
else if (strcasecmp(command, "beardcolor") == 0)
{
if (BeardColor == 0 && codeMove == 'p')
BeardColor = 24;
else if (BeardColor >= 24 && codeMove != 'p')
BeardColor = 0;
else
BeardColor += Adjustment;
ChangeType = "BeardColor";
ChangeSetting = BeardColor;
}
else if (strcasecmp(command, "heritage") == 0)
{
if (DrakkinHeritage == 0 && codeMove == 'p')
DrakkinHeritage = 6;
else if (DrakkinHeritage >= 6 && codeMove != 'p')
DrakkinHeritage = 0;
else
DrakkinHeritage += Adjustment;
ChangeType = "DrakkinHeritage";
ChangeSetting = DrakkinHeritage;
}
else if (strcasecmp(command, "tattoo") == 0)
{
if (DrakkinTattoo == 0 && codeMove == 'p')
DrakkinTattoo = 8;
else if (DrakkinTattoo >= 8 && codeMove != 'p')
DrakkinTattoo = 0;
else
DrakkinTattoo += Adjustment;
ChangeType = "DrakkinTattoo";
ChangeSetting = DrakkinTattoo;
}
else if (strcasecmp(command, "detail") == 0)
{
if (DrakkinDetails == 0 && codeMove == 'p')
DrakkinDetails = 7;
else if (DrakkinDetails >= 7 && codeMove != 'p')
DrakkinDetails = 0;
else
DrakkinDetails += Adjustment;
ChangeType = "DrakkinDetails";
ChangeSetting = DrakkinDetails;
}
// Hack to fix some races that base features from face
switch (Race)
{
case 2: // Barbarian
if (LuclinFace > 10) {
LuclinFace -= ((DrakkinTattoo - 1) * 10);
}
LuclinFace += (DrakkinTattoo * 10);
break;
case 3: // Erudite
if (LuclinFace > 10) {
LuclinFace -= ((HairStyle - 1) * 10);
}
LuclinFace += (HairStyle * 10);
break;
case 5: // HighElf
case 6: // DarkElf
case 7: // HalfElf
if (LuclinFace > 10) {
LuclinFace -= ((Beard - 1) * 10);
}
LuclinFace += (Beard * 10);
break;
default:
break;
}
if (ChangeType == nullptr)
{
c->Message(Chat::White,Usage);
}
else
{
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White, "%s=%i", ChangeType, ChangeSetting);
}
}
}
void command_gmspeed(Client *c, const Seperator *sep)
{
bool state = atobool(sep->arg[1]);
Client *t = c;
if (c->GetTarget() && c->GetTarget()->IsClient()) {
t = c->GetTarget()->CastToClient();
}
if (sep->arg[1][0] != 0) {
database.SetGMSpeed(t->AccountID(), state ? 1 : 0);
c->Message(Chat::White, "Turning GMSpeed %s for %s (zone to take effect)", state ? "On" : "Off", t->GetName());
}
else {
c->Message(Chat::White, "Usage: #gmspeed [on/off]");
}
}
void command_gmzone(Client *c, const Seperator *sep)
{
if (!sep->arg[1]) {
c->Message(Chat::White, "Usage");
c->Message(Chat::White, "-------");
c->Message(Chat::White, "#gmzone [zone_short_name] [zone_version=0]");
return;
}
std::string zone_short_name_string = sep->arg[1];
const char *zone_short_name = sep->arg[1];
auto zone_version = static_cast<uint32>(sep->arg[2] ? atoi(sep->arg[2]) : 0);
std::string identifier = "gmzone";
uint32 zone_id = database.GetZoneID(zone_short_name);
uint32 duration = 100000000;
uint16 instance_id = 0;
if (zone_id == 0) {
c->Message(Chat::Red, "Invalid zone specified");
return;
}
if (sep->arg[3] && sep->arg[3][0]) {
identifier = sep->arg[3];
}
std::string bucket_key = StringFormat("%s-%s-%u-instance", zone_short_name, identifier.c_str(), zone_version);
std::string existing_zone_instance = DataBucket::GetData(bucket_key);
if (existing_zone_instance.length() > 0) {
instance_id = std::stoi(existing_zone_instance);
c->Message(Chat::Yellow, "Found already created instance (%s) (%u)", zone_short_name, instance_id);
}
if (instance_id == 0) {
if (!database.GetUnusedInstanceID(instance_id)) {
c->Message(Chat::Red, "Server was unable to find a free instance id.");
return;
}
if (!database.CreateInstance(instance_id, zone_id, zone_version, duration)) {
c->Message(Chat::Red, "Server was unable to create a new instance.");
return;
}
c->Message(Chat::Yellow, "New private GM instance %s was created with id %lu.", zone_short_name, (unsigned long) instance_id);
DataBucket::SetData(bucket_key, std::to_string(instance_id));
}
if (instance_id > 0) {
float target_x = -1, target_y = -1, target_z = -1;
int16 min_status = 0;
uint8 min_level = 0;
if (!database.GetSafePoints(
zone_short_name,
zone_version,
&target_x,
&target_y,
&target_z,
&min_status,
&min_level
)) {
c->Message(Chat::Red, "Failed to find safe coordinates for specified zone");
}
c->Message(Chat::Yellow, "Zoning to private GM instance (%s) (%u)", zone_short_name, instance_id);
c->AssignToInstance(instance_id);
c->MovePC(zone_id, instance_id, target_x, target_y, target_z, 0, 1);
}
}
void command_title(Client *c, const Seperator *sep)
{
if (sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #title [remove|text] [1 = Create row in title table] - remove or set title to 'text'");
else {
bool Save = (atoi(sep->arg[2]) == 1);
Mob *target_mob = c->GetTarget();
if(!target_mob)
target_mob = c;
if(!target_mob->IsClient()) {
c->Message(Chat::Red, "#title only works on players.");
return;
}
Client *t = target_mob->CastToClient();
if(strlen(sep->arg[1]) > 31) {
c->Message(Chat::Red, "Title must be 31 characters or less.");
return;
}
bool removed = false;
if(!strcasecmp(sep->arg[1], "remove")) {
t->SetAATitle("");
removed = true;
} else {
for(unsigned int i=0; i<strlen(sep->arg[1]); i++)
if(sep->arg[1][i]=='_')
sep->arg[1][i] = ' ';
if(!Save)
t->SetAATitle(sep->arg[1]);
else
title_manager.CreateNewPlayerTitle(t, sep->arg[1]);
}
t->Save();
if(removed) {
c->Message(Chat::Red, "%s's title has been removed.", t->GetName(), sep->arg[1]);
if(t != c)
t->Message(Chat::Red, "Your title has been removed.", sep->arg[1]);
} else {
c->Message(Chat::Red, "%s's title has been changed to '%s'.", t->GetName(), sep->arg[1]);
if(t != c)
t->Message(Chat::Red, "Your title has been changed to '%s'.", sep->arg[1]);
}
}
}
void command_titlesuffix(Client *c, const Seperator *sep)
{
if (sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #titlesuffix [remove|text] [1 = create row in title table] - remove or set title suffix to 'text'");
else {
bool Save = (atoi(sep->arg[2]) == 1);
Mob *target_mob = c->GetTarget();
if(!target_mob)
target_mob = c;
if(!target_mob->IsClient()) {
c->Message(Chat::Red, "#titlesuffix only works on players.");
return;
}
Client *t = target_mob->CastToClient();
if(strlen(sep->arg[1]) > 31) {
c->Message(Chat::Red, "Title suffix must be 31 characters or less.");
return;
}
bool removed = false;
if(!strcasecmp(sep->arg[1], "remove")) {
t->SetTitleSuffix("");
removed = true;
} else {
for(unsigned int i=0; i<strlen(sep->arg[1]); i++)
if(sep->arg[1][i]=='_')
sep->arg[1][i] = ' ';
if(!Save)
t->SetTitleSuffix(sep->arg[1]);
else
title_manager.CreateNewPlayerSuffix(t, sep->arg[1]);
}
t->Save();
if(removed) {
c->Message(Chat::Red, "%s's title suffix has been removed.", t->GetName(), sep->arg[1]);
if(t != c)
t->Message(Chat::Red, "Your title suffix has been removed.", sep->arg[1]);
} else {
c->Message(Chat::Red, "%s's title suffix has been changed to '%s'.", t->GetName(), sep->arg[1]);
if(t != c)
t->Message(Chat::Red, "Your title suffix has been changed to '%s'.", sep->arg[1]);
}
}
}
void command_spellinfo(Client *c, const Seperator *sep)
{
if(sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #spellinfo [spell_id]");
else {
short int spell_id=atoi(sep->arg[1]);
const struct SPDat_Spell_Struct *s=&spells[spell_id];
c->Message(Chat::White, "Spell info for spell #%d:", spell_id);
c->Message(Chat::White, " name: %s", s->name);
c->Message(Chat::White, " player_1: %s", s->player_1);
c->Message(Chat::White, " teleport_zone: %s", s->teleport_zone);
c->Message(Chat::White, " you_cast: %s", s->you_cast);
c->Message(Chat::White, " other_casts: %s", s->other_casts);
c->Message(Chat::White, " cast_on_you: %s", s->cast_on_you);
c->Message(Chat::White, " spell_fades: %s", s->spell_fades);
c->Message(Chat::White, " range: %f", s->range);
c->Message(Chat::White, " aoerange: %f", s->aoerange);
c->Message(Chat::White, " pushback: %f", s->pushback);
c->Message(Chat::White, " pushup: %f", s->pushup);
c->Message(Chat::White, " cast_time: %d", s->cast_time);
c->Message(Chat::White, " recovery_time: %d", s->recovery_time);
c->Message(Chat::White, " recast_time: %d", s->recast_time);
c->Message(Chat::White, " buffdurationformula: %d", s->buffdurationformula);
c->Message(Chat::White, " buffduration: %d", s->buffduration);
c->Message(Chat::White, " AEDuration: %d", s->AEDuration);
c->Message(Chat::White, " mana: %d", s->mana);
c->Message(Chat::White, " base[12]: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", s->base[0], s->base[1], s->base[2], s->base[3], s->base[4], s->base[5], s->base[6], s->base[7], s->base[8], s->base[9], s->base[10], s->base[11]);
c->Message(Chat::White, " base22[12]: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", s->base2[0], s->base2[1], s->base2[2], s->base2[3], s->base2[4], s->base2[5], s->base2[6], s->base2[7], s->base2[8], s->base2[9], s->base2[10], s->base2[11]);
c->Message(Chat::White, " max[12]: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", s->max[0], s->max[1], s->max[2], s->max[3], s->max[4], s->max[5], s->max[6], s->max[7], s->max[8], s->max[9], s->max[10], s->max[11]);
c->Message(Chat::White, " components[4]: %d, %d, %d, %d", s->components[0], s->components[1], s->components[2], s->components[3]);
c->Message(Chat::White, " component_counts[4]: %d, %d, %d, %d", s->component_counts[0], s->component_counts[1], s->component_counts[2], s->component_counts[3]);
c->Message(Chat::White, " NoexpendReagent[4]: %d, %d, %d, %d", s->NoexpendReagent[0], s->NoexpendReagent[1], s->NoexpendReagent[2], s->NoexpendReagent[3]);
c->Message(Chat::White, " formula[12]: 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x", s->formula[0], s->formula[1], s->formula[2], s->formula[3], s->formula[4], s->formula[5], s->formula[6], s->formula[7], s->formula[8], s->formula[9], s->formula[10], s->formula[11]);
c->Message(Chat::White, " goodEffect: %d", s->goodEffect);
c->Message(Chat::White, " Activated: %d", s->Activated);
c->Message(Chat::White, " resisttype: %d", s->resisttype);
c->Message(Chat::White, " effectid[12]: 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x", s->effectid[0], s->effectid[1], s->effectid[2], s->effectid[3], s->effectid[4], s->effectid[5], s->effectid[6], s->effectid[7], s->effectid[8], s->effectid[9], s->effectid[10], s->effectid[11]);
c->Message(Chat::White, " targettype: %d", s->targettype);
c->Message(Chat::White, " basediff: %d", s->basediff);
c->Message(Chat::White, " skill: %d", s->skill);
c->Message(Chat::White, " zonetype: %d", s->zonetype);
c->Message(Chat::White, " EnvironmentType: %d", s->EnvironmentType);
c->Message(Chat::White, " TimeOfDay: %d", s->TimeOfDay);
c->Message(Chat::White, " classes[15]: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
s->classes[0], s->classes[1], s->classes[2], s->classes[3], s->classes[4],
s->classes[5], s->classes[6], s->classes[7], s->classes[8], s->classes[9],
s->classes[10], s->classes[11], s->classes[12], s->classes[13], s->classes[14]);
c->Message(Chat::White, " CastingAnim: %d", s->CastingAnim);
c->Message(Chat::White, " SpellAffectIndex: %d", s->SpellAffectIndex);
c->Message(Chat::White, " RecourseLink: %d", s->RecourseLink);
}
}
void command_lastname(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
LogInfo("#lastname request from [{}] for [{}]", c->GetName(), t->GetName());
if(strlen(sep->arg[1]) <= 70)
t->ChangeLastName(sep->arg[1]);
else
c->Message(Chat::White, "Usage: #lastname <lastname> where <lastname> is less than 70 chars long");
}
void command_memspell(Client *c, const Seperator *sep)
{
uint32 slot;
uint16 spell_id;
if (!(sep->IsNumber(1) && sep->IsNumber(2)))
{
c->Message(Chat::White, "Usage: #MemSpell slotid spellid");
}
else
{
slot = atoi(sep->arg[1]) - 1;
spell_id = atoi(sep->arg[2]);
if (slot > EQEmu::spells::SPELL_GEM_COUNT || spell_id >= SPDAT_RECORDS)
{
c->Message(Chat::White, "Error: #MemSpell: Arguement out of range");
}
else
{
c->MemSpell(spell_id, slot);
c->Message(Chat::White, "Spell slot changed, have fun!");
}
}
}
void command_save(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->Message(Chat::White, "Error: no target");
else if (c->GetTarget()->IsClient()) {
if (c->GetTarget()->CastToClient()->Save(2))
c->Message(Chat::White, "%s successfully saved.", c->GetTarget()->GetName());
else
c->Message(Chat::White, "Manual save for %s failed.", c->GetTarget()->GetName());
}
else if (c->GetTarget()->IsPlayerCorpse()) {
if (c->GetTarget()->CastToMob()->Save())
c->Message(Chat::White, "%s successfully saved. (dbid=%u)", c->GetTarget()->GetName(), c->GetTarget()->CastToCorpse()->GetCorpseDBID());
else
c->Message(Chat::White, "Manual save for %s failed.", c->GetTarget()->GetName());
}
else
c->Message(Chat::White, "Error: target not a Client/PlayerCorpse");
}
void command_showstats(Client *c, const Seperator *sep)
{
if (c->GetTarget() != 0 )
c->GetTarget()->ShowStats(c);
else
c->ShowStats(c);
}
void command_showzonegloballoot(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "GlobalLoot for %s (%d:%d)", zone->GetShortName(), zone->GetZoneID(), zone->GetInstanceVersion());
zone->ShowZoneGlobalLoot(c);
}
void command_mystats(Client *c, const Seperator *sep)
{
if (c->GetTarget() && c->GetPet()) {
if (c->GetTarget()->IsPet() && c->GetTarget() == c->GetPet())
c->GetTarget()->ShowStats(c);
else
c->ShowStats(c);
}
else
c->ShowStats(c);
}
void command_myskills(Client *c, const Seperator *sep)
{
c->ShowSkillsWindow();
}
void command_bind(Client *c, const Seperator *sep)
{
if (c->GetTarget() != 0 ) {
if (c->GetTarget()->IsClient())
c->GetTarget()->CastToClient()->SetBindPoint();
else
c->Message(Chat::White, "Error: target not a Player");
} else
c->SetBindPoint();
}
void command_depop(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0 || !(c->GetTarget()->IsNPC() || c->GetTarget()->IsNPCCorpse()))
c->Message(Chat::White, "You must have a NPC target for this command. (maybe you meant #depopzone?)");
else {
c->Message(Chat::White, "Depoping '%s'.", c->GetTarget()->GetName());
c->GetTarget()->Depop();
}
}
void command_depopzone(Client *c, const Seperator *sep)
{
zone->Depop();
c->Message(Chat::White, "Zone depoped.");
}
void command_devtools(Client *c, const Seperator *sep)
{
std::string menu_commands_search;
std::string window_toggle_command;
/**
* Search entity commands
*/
menu_commands_search += "[" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#list npcs", false, "NPC") + "] ";
menu_commands_search += "[" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#list players", false, "Players") + "] ";
menu_commands_search += "[" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#list corpses", false, "Corpses") + "] ";
menu_commands_search += "[" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#list doors", false, "Doors") + "] ";
menu_commands_search += "[" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#list objects", false, "Objects") + "] ";
std::string dev_tools_window_key = StringFormat("%i-dev-tools-window-disabled", c->AccountID());
/**
* Handle window toggle
*/
if (strcasecmp(sep->arg[1], "disable_window") == 0) {
DataBucket::SetData(dev_tools_window_key, "true");
c->SetDevToolsWindowEnabled(false);
}
if (strcasecmp(sep->arg[1], "enable_window") == 0) {
DataBucket::DeleteData(dev_tools_window_key);
c->SetDevToolsWindowEnabled(true);
}
/**
* Show window status
*/
window_toggle_command = "Disabled [" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#devtools enable_window", false, "Enable") + "] ";
if (c->IsDevToolsWindowEnabled()) {
window_toggle_command = "Enabled [" + EQEmu::SayLinkEngine::GenerateQuestSaylink("#devtools disable_window", false, "Disable") + "] ";
}
/**
* Print menu
*/
c->Message(Chat::White, "| [Devtools] Window %s", window_toggle_command.c_str());
c->Message(Chat::White, "| [Devtools] Search %s", menu_commands_search.c_str());
}
void command_repop(Client *c, const Seperator *sep)
{
int timearg = 1;
int delay = 0;
if (sep->arg[1] && strcasecmp(sep->arg[1], "force") == 0) {
timearg++;
LinkedListIterator<Spawn2*> iterator(zone->spawn2_list);
iterator.Reset();
while (iterator.MoreElements()) {
std::string query = StringFormat(
"DELETE FROM respawn_times WHERE id = %lu AND instance_id = %lu",
(unsigned long)iterator.GetData()->GetID(),
(unsigned long)zone->GetInstanceID()
);
auto results = database.QueryDatabase(query);
iterator.Advance();
}
c->Message(Chat::White, "Zone depop: Force resetting spawn timers.");
}
if (!sep->IsNumber(timearg)) {
c->Message(Chat::White, "Zone depopped - repopping now.");
zone->Repop();
/* Force a spawn2 timer trigger so we don't delay actually spawning the NPC's */
zone->spawn2_timer.Trigger();
return;
}
c->Message(Chat::White, "Zone depoped. Repop in %i seconds", atoi(sep->arg[timearg]));
zone->Repop(atoi(sep->arg[timearg]) * 1000);
zone->spawn2_timer.Trigger();
}
void command_repopclose(Client *c, const Seperator *sep)
{
int repop_distance = 500;
if (sep->arg[1] && strcasecmp(sep->arg[1], "force") == 0) {
LinkedListIterator<Spawn2*> iterator(zone->spawn2_list);
iterator.Reset();
while (iterator.MoreElements()) {
std::string query = StringFormat(
"DELETE FROM respawn_times WHERE id = %lu AND instance_id = %lu",
(unsigned long)iterator.GetData()->GetID(),
(unsigned long)zone->GetInstanceID()
);
auto results = database.QueryDatabase(query);
iterator.Advance();
}
c->Message(Chat::White, "Zone depop: Force resetting spawn timers.");
}
if (sep->IsNumber(1)) {
repop_distance = atoi(sep->arg[1]);
}
c->Message(Chat::White, "Zone depoped. Repopping NPC's within %i distance units", repop_distance);
zone->RepopClose(c->GetPosition(), repop_distance);
}
void command_spawnstatus(Client *c, const Seperator *sep)
{
if((sep->arg[1][0] == 'e') | (sep->arg[1][0] == 'E'))
{
// show only enabled spawns
zone->ShowEnabledSpawnStatus(c);
}
else if((sep->arg[1][0] == 'd') | (sep->arg[1][0] == 'D'))
{
// show only disabled spawns
zone->ShowDisabledSpawnStatus(c);
}
else if((sep->arg[1][0] == 'a') | (sep->arg[1][0] == 'A'))
{
// show all spawn staus with no filters
zone->SpawnStatus(c);
}
else if(sep->IsNumber(1))
{
// show spawn status by spawn2 id
zone->ShowSpawnStatusByID(c, atoi(sep->arg[1]));
}
else if(strcmp(sep->arg[1], "help") == 0)
{
c->Message(Chat::White, "Usage: #spawnstatus <[a]ll | [d]isabled | [e]nabled | {Spawn2 ID}>");
}
else {
zone->SpawnStatus(c);
}
}
void command_nukebuffs(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->BuffFadeAll();
else
c->GetTarget()->BuffFadeAll();
}
void command_zuwcoords(Client *c, const Seperator *sep)
{
// modifys and resends zhdr packet
if(sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #zuwcoords <under world coords>");
else {
zone->newzone_data.underworld = atof(sep->arg[1]);
//float newdata = atof(sep->arg[1]);
//memcpy(&zone->zone_header_data[130], &newdata, sizeof(float));
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_zunderworld(Client *c, const Seperator *sep)
{
if(sep->arg[1][0]==0)
c->Message(Chat::White, "Usage: #zunderworld <zcoord>");
else {
zone->newzone_data.underworld = atof(sep->arg[1]);
}
}
void command_zsafecoords(Client *c, const Seperator *sep)
{
// modifys and resends zhdr packet
if(sep->arg[3][0]==0)
c->Message(Chat::White, "Usage: #zsafecoords <safe x> <safe y> <safe z>");
else {
zone->newzone_data.safe_x = atof(sep->arg[1]);
zone->newzone_data.safe_y = atof(sep->arg[2]);
zone->newzone_data.safe_z = atof(sep->arg[3]);
//float newdatax = atof(sep->arg[1]);
//float newdatay = atof(sep->arg[2]);
//float newdataz = atof(sep->arg[3]);
//memcpy(&zone->zone_header_data[114], &newdatax, sizeof(float));
//memcpy(&zone->zone_header_data[118], &newdatay, sizeof(float));
//memcpy(&zone->zone_header_data[122], &newdataz, sizeof(float));
//zone->SetSafeCoords();
auto outapp = new EQApplicationPacket(OP_NewZone, sizeof(NewZone_Struct));
memcpy(outapp->pBuffer, &zone->newzone_data, outapp->size);
entity_list.QueueClients(c, outapp);
safe_delete(outapp);
}
}
void command_freeze(Client *c, const Seperator *sep)
{
if (c->GetTarget() != 0)
c->GetTarget()->SendAppearancePacket(AT_Anim, ANIM_FREEZE);
else
c->Message(Chat::White, "ERROR: Freeze requires a target.");
}
void command_unfreeze(Client *c, const Seperator *sep)
{
if (c->GetTarget() != 0)
c->GetTarget()->SendAppearancePacket(AT_Anim, ANIM_STAND);
else
c->Message(Chat::White, "ERROR: Unfreeze requires a target.");
}
void command_push(Client *c, const Seperator *sep)
{
Mob *t = c;
if (c->GetTarget() != nullptr)
t = c->GetTarget();
if (!sep->arg[1] || !sep->IsNumber(1)) {
c->Message(Chat::White, "ERROR: Must provide at least a push back.");
return;
}
float back = atof(sep->arg[1]);
float up = 0.0f;
if (sep->arg[2] && sep->IsNumber(2))
up = atof(sep->arg[2]);
if (t->IsNPC()) {
t->IncDeltaX(back * g_Math.FastSin(c->GetHeading()));
t->IncDeltaY(back * g_Math.FastCos(c->GetHeading()));
t->IncDeltaZ(up);
t->SetForcedMovement(6);
} else if (t->IsClient()) {
// TODO: send packet to push
}
}
void command_proximity(Client *c, const Seperator *sep)
{
if (!c->GetTarget() && !c->GetTarget()->IsNPC()) {
c->Message(Chat::White, "You must target an NPC");
return;
}
for (auto &iter : entity_list.GetNPCList()) {
auto npc = iter.second;
std::string name = npc->GetName();
if (name.find("Proximity") != std::string::npos) {
npc->Depop();
}
}
NPC *npc = c->GetTarget()->CastToNPC();
std::vector<FindPerson_Point> points;
FindPerson_Point p{};
if (npc->IsProximitySet()) {
glm::vec4 position;
position.w = npc->GetHeading();
position.x = npc->GetProximityMinX();
position.y = npc->GetProximityMinY();
position.z = npc->GetZ();
position.x = npc->GetProximityMinX();
position.y = npc->GetProximityMinY();
NPC::SpawnNodeNPC("Proximity", "", position);
position.x = npc->GetProximityMinX();
position.y = npc->GetProximityMaxY();
NPC::SpawnNodeNPC("Proximity", "", position);
position.x = npc->GetProximityMaxX();
position.y = npc->GetProximityMinY();
NPC::SpawnNodeNPC("Proximity", "", position);
position.x = npc->GetProximityMaxX();
position.y = npc->GetProximityMaxY();
NPC::SpawnNodeNPC("Proximity", "", position);
p.x = npc->GetProximityMinX();
p.y = npc->GetProximityMinY();
p.z = npc->GetZ();
points.push_back(p);
p.x = npc->GetProximityMinX();
p.y = npc->GetProximityMaxY();
points.push_back(p);
p.x = npc->GetProximityMaxX();
p.y = npc->GetProximityMaxY();
points.push_back(p);
p.x = npc->GetProximityMaxX();
p.y = npc->GetProximityMinY();
points.push_back(p);
p.x = npc->GetProximityMinX();
p.y = npc->GetProximityMinY();
points.push_back(p);
}
if (c->ClientVersion() >= EQEmu::versions::ClientVersion::RoF) {
c->SendPathPacket(points);
}
}
void command_pvp(Client *c, const Seperator *sep)
{
bool state=atobool(sep->arg[1]);
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0] != 0) {
t->SetPVP(state);
c->Message(Chat::White, "%s now follows the ways of %s.", t->GetName(), state?"discord":"order");
}
else
c->Message(Chat::White, "Usage: #pvp [on/off]");
}
void command_setxp(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if (sep->IsNumber(1)) {
if (atoi(sep->arg[1]) > 9999999)
c->Message(Chat::White, "Error: Value too high.");
else
t->AddEXP(atoi(sep->arg[1]));
}
else
c->Message(Chat::White, "Usage: #setxp number");
}
void command_setpvppoints(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if (sep->IsNumber(1)) {
if (atoi(sep->arg[1]) > 9999999)
c->Message(Chat::White, "Error: Value too high.");
else
{
t->SetPVPPoints(atoi(sep->arg[1]));
t->Save();
t->SendPVPStats();
}
}
else
c->Message(Chat::White, "Usage: #setpvppoints number");
}
void command_name(Client *c, const Seperator *sep)
{
Client *target;
if( (strlen(sep->arg[1]) == 0) || (!(c->GetTarget() && c->GetTarget()->IsClient())) )
c->Message(Chat::White, "Usage: #name newname (requires player target)");
else
{
target = c->GetTarget()->CastToClient();
char *oldname = strdup(target->GetName());
if(target->ChangeFirstName(sep->arg[1], c->GetName()))
{
c->Message(Chat::White, "Successfully renamed %s to %s", oldname, sep->arg[1]);
// until we get the name packet working right this will work
c->Message(Chat::White, "Sending player to char select.");
target->Kick("Name was changed");
}
else
c->Message(Chat::Red, "ERROR: Unable to rename %s. Check that the new name '%s' isn't already taken.", oldname, sep->arg[2]);
free(oldname);
}
}
void command_tempname(Client *c, const Seperator *sep)
{
Mob *target;
target = c->GetTarget();
if(!target)
c->Message(Chat::White, "Usage: #tempname newname (requires a target)");
else if(strlen(sep->arg[1]) > 0)
{
char *oldname = strdup(target->GetName());
target->TempName(sep->arg[1]);
c->Message(Chat::White, "Renamed %s to %s", oldname, sep->arg[1]);
free(oldname);
}
else {
target->TempName();
c->Message(Chat::White, "Restored the original name");
}
}
void command_petname(Client *c, const Seperator *sep)
{
Mob *target;
target = c->GetTarget();
if(!target)
c->Message(Chat::White, "Usage: #petname newname (requires a target)");
else if(target->IsPet() && (target->GetOwnerID() == c->GetID()) && strlen(sep->arg[1]) > 0)
{
char *oldname = strdup(target->GetName());
target->TempName(sep->arg[1]);
c->Message(Chat::White, "Renamed %s to %s", oldname, sep->arg[1]);
free(oldname);
}
else {
target->TempName();
c->Message(Chat::White, "Restored the original name");
}
}
void command_npcspecialattk(Client *c, const Seperator *sep)
{
if (c->GetTarget()==0 || c->GetTarget()->IsClient() || strlen(sep->arg[1]) <= 0 || strlen(sep->arg[2]) <= 0)
c->Message(Chat::White, "Usage: #npcspecialattk *flagchar* *permtag* (Flags are E(nrage) F(lurry) R(ampage) S(ummon), permtag is 1 = True, 0 = False).");
else {
c->GetTarget()->CastToNPC()->NPCSpecialAttacks(sep->arg[1],atoi(sep->arg[2]));
c->Message(Chat::White, "NPC Special Attack set.");
}
}
void command_kill(Client *c, const Seperator *sep)
{
if (!c->GetTarget()) {
c->Message(Chat::White, "Error: #Kill: No target.");
}
else
if (!c->GetTarget()->IsClient() || c->GetTarget()->CastToClient()->Admin() <= c->Admin())
c->GetTarget()->Kill();
}
void command_killallnpcs(Client *c, const Seperator *sep)
{
std::string search_string;
if (sep->arg[1]) {
search_string = sep->arg[1];
}
int count = 0;
for (auto &itr : entity_list.GetMobList()) {
Mob *entity = itr.second;
if (!entity->IsNPC()) {
continue;
}
std::string entity_name = entity->GetName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos) {
continue;
}
bool is_not_attackable =
(
entity->IsInvisible() ||
!entity->IsAttackAllowed(c) ||
entity->GetRace() == 127 ||
entity->GetRace() == 240
);
if (is_not_attackable) {
continue;
}
entity->Damage(c, 1000000000, 0, EQEmu::skills::SkillDragonPunch);
count++;
}
c->Message(Chat::Yellow, "Killed (%i) npc(s)", count);
}
void command_haste(Client *c, const Seperator *sep)
{
// #haste command to set client attack speed. Takes a percentage (100 = twice normal attack speed)
if(sep->arg[1][0] != 0) {
uint16 Haste = atoi(sep->arg[1]);
if(Haste > 85)
Haste = 85;
c->SetExtraHaste(Haste);
// SetAttackTimer must be called to make this take effect, so player needs to change
// the primary weapon.
c->Message(Chat::White, "Haste set to %d%% - Need to re-equip primary weapon before it takes effect", Haste);
}
else
c->Message(Chat::White, "Usage: #haste [percentage]");
}
void command_damage(Client *c, const Seperator *sep)
{
if (c->GetTarget()==0)
c->Message(Chat::White, "Error: #Damage: No Target.");
else if (!sep->IsNumber(1)) {
c->Message(Chat::White, "Usage: #damage x");
}
else {
int32 nkdmg = atoi(sep->arg[1]);
if (nkdmg > 2100000000)
c->Message(Chat::White, "Enter a value less then 2,100,000,000.");
else
c->GetTarget()->Damage(c, nkdmg, SPELL_UNKNOWN, EQEmu::skills::SkillHandtoHand, false);
}
}
void command_zonespawn(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "This command is not yet implemented.");
return;
/* this was kept from client.cpp verbatim (it was commented out) */
// if (target && target->IsNPC()) {
// Message(0, "Inside main if.");
// if (strcasecmp(sep->arg[1], "add")==0) {
// Message(0, "Inside add if.");
// database.DBSpawn(1, StaticGetZoneName(this->GetPP().current_zone), target->CastToNPC());
// }
// else if (strcasecmp(sep->arg[1], "update")==0) {
// database.DBSpawn(2, StaticGetZoneName(this->GetPP().current_zone), target->CastToNPC());
// }
// else if (strcasecmp(sep->arg[1], "remove")==0) {
// if (strcasecmp(sep->arg[2], "all")==0) {
// database.DBSpawn(4, StaticGetZoneName(this->GetPP().current_zone));
// }
// else {
// if (database.DBSpawn(3, StaticGetZoneName(this->GetPP().current_zone), target->CastToNPC())) {
// Message(0, "#zonespawn: %s removed successfully!", target->GetName());
// target->CastToNPC()->Death(target, target->GetHP());
// }
// }
// }
// else
// Message(0, "Error: #dbspawn: Invalid command. (Note: EDIT and REMOVE are NOT in yet.)");
// if (target->CastToNPC()->GetNPCTypeID() > 0) {
// Message(0, "Spawn is type %i", target->CastToNPC()->GetNPCTypeID());
// }
// }
// else if(!target || !target->IsNPC())
// Message(0, "Error: #zonespawn: You must have a NPC targeted!");
// else
// Message(0, "Usage: #zonespawn [add|edit|remove|remove all]");
}
void command_npcspawn(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
uint32 extra = 0;
if (target && target->IsNPC()) {
if (strcasecmp(sep->arg[1], "create") == 0) {
if (atoi(sep->arg[2]))
{
// Option to try to create the npc_type ID within the range for the current zone (zone_id * 1000)
extra = 1;
}
database.NPCSpawnDB(0, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC(), extra);
c->Message(Chat::White, "%s created successfully!", target->GetName());
}
else if (strcasecmp(sep->arg[1], "add") == 0) {
if (atoi(sep->arg[2]))
{
extra = atoi(sep->arg[2]);
}
else
{
// Respawn Timer default if not set
extra = 1200;
}
database.NPCSpawnDB(1, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC(), extra);
c->Message(Chat::White, "%s added successfully!", target->GetName());
}
else if (strcasecmp(sep->arg[1], "update") == 0) {
database.NPCSpawnDB(2, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC());
c->Message(Chat::White, "%s updated!", target->GetName());
}
else if (strcasecmp(sep->arg[1], "remove") == 0) {
database.NPCSpawnDB(3, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC());
c->Message(Chat::White, "%s removed successfully from database!", target->GetName());
target->Depop(false);
}
else if (strcasecmp(sep->arg[1], "delete") == 0) {
database.NPCSpawnDB(4, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC());
c->Message(Chat::White, "%s deleted from database!", target->GetName());
target->Depop(false);
}
else {
c->Message(Chat::White, "Error: #npcspawn: Invalid command.");
c->Message(Chat::White, "Usage: #npcspawn [create|add|update|remove|delete]");
}
}
else
c->Message(Chat::White, "Error: #npcspawn: You must have a NPC targeted!");
}
void command_spawnfix(Client *c, const Seperator *sep) {
Mob *targetMob = c->GetTarget();
if (!targetMob || !targetMob->IsNPC()) {
c->Message(Chat::White, "Error: #spawnfix: Need an NPC target.");
return;
}
Spawn2* s2 = targetMob->CastToNPC()->respawn2;
if(!s2) {
c->Message(Chat::White, "#spawnfix FAILED -- cannot determine which spawn entry in the database this mob came from.");
return;
}
std::string query = StringFormat("UPDATE spawn2 SET x = '%f', y = '%f', z = '%f', heading = '%f' WHERE id = '%i'",
c->GetX(), c->GetY(), c->GetZ(), c->GetHeading(),s2->GetID());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::Red, "Update failed! MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Updating coordinates successful.");
targetMob->Depop(false);
}
void command_loc(Client *c, const Seperator *sep)
{
Mob *t=c->GetTarget()?c->GetTarget():c->CastToMob();
c->Message(Chat::White, "%s's Location (XYZ): %1.2f, %1.2f, %1.2f; heading=%1.1f", t->GetName(), t->GetX(), t->GetY(), t->GetZ(), t->GetHeading());
}
void command_goto(Client *c, const Seperator *sep)
{
std::string arg1 = sep->arg[1];
bool goto_via_target_no_args = sep->arg[1][0] == '\0' && c->GetTarget();
bool goto_via_player_name = !sep->IsNumber(1) && !arg1.empty();
bool goto_via_x_y_z = sep->IsNumber(1) && sep->IsNumber(2) && sep->IsNumber(3);
if (goto_via_target_no_args) {
c->MovePC(
zone->GetZoneID(),
zone->GetInstanceID(),
c->GetTarget()->GetX(),
c->GetTarget()->GetY(),
c->GetTarget()->GetZ(),
c->GetTarget()->GetHeading()
);
}
else if (goto_via_player_name) {
/**
* Find them in zone first
*/
const char *player_name = sep->arg[1];
std::string player_name_string = sep->arg[1];
Client *client = entity_list.GetClientByName(player_name);
if (client) {
c->MovePC(
zone->GetZoneID(),
zone->GetInstanceID(),
client->GetX(),
client->GetY(),
client->GetZ(),
client->GetHeading()
);
c->Message(Chat::Yellow, "Goto player '%s' same zone", player_name_string.c_str());
}
else if (c->GotoPlayer(player_name_string)) {
c->Message(Chat::Yellow, "Goto player '%s' different zone", player_name_string.c_str());
}
else {
c->Message(Chat::Yellow, "Player '%s' not found", player_name_string.c_str());
}
}
else if (goto_via_x_y_z) {
c->MovePC(
zone->GetZoneID(),
zone->GetInstanceID(),
atof(sep->arg[1]),
atof(sep->arg[2]),
atof(sep->arg[3]),
(sep->arg[4] ? atof(sep->arg[4]) : c->GetHeading())
);
}
else {
c->Message(Chat::White, "Usage: #goto [x y z] [h]");
c->Message(Chat::White, "Usage: #goto [player_name]");
}
}
void command_iteminfo(Client *c, const Seperator *sep)
{
auto inst = c->GetInv()[EQEmu::invslot::slotCursor];
if (!inst) {
c->Message(Chat::Red, "Error: You need an item on your cursor for this command");
return;
}
auto item = inst->GetItem();
if (!item) {
LogInventory("([{}]) Command #iteminfo processed an item with no data pointer");
c->Message(Chat::Red, "Error: This item has no data reference");
return;
}
EQEmu::SayLinkEngine linker;
linker.SetLinkType(EQEmu::saylink::SayLinkItemInst);
linker.SetItemInst(inst);
c->Message(Chat::White, "*** Item Info for [%s] ***", linker.GenerateLink().c_str());
c->Message(Chat::White, ">> ID: %u, ItemUseType: %u, ItemClassType: %u", item->ID, item->ItemType, item->ItemClass);
c->Message(Chat::White, ">> IDFile: '%s', IconID: %u", item->IDFile, item->Icon);
c->Message(Chat::White, ">> Size: %u, Weight: %u, Price: %u, LDoNPrice: %u", item->Size, item->Weight, item->Price, item->LDoNPrice);
c->Message(Chat::White, ">> Material: 0x%02X, Color: 0x%08X, Tint: 0x%08X, Light: 0x%02X", item->Material, item->Color, inst->GetColor(), item->Light);
c->Message(Chat::White, ">> IsLore: %s, LoreGroup: %u, Lore: '%s'", (item->LoreFlag ? "TRUE" : "FALSE"), item->LoreGroup, item->Lore);
c->Message(Chat::White, ">> NoDrop: %u, NoRent: %u, NoPet: %u, NoTransfer: %u, FVNoDrop: %u",
item->NoDrop, item->NoRent, (uint8)item->NoPet, (uint8)item->NoTransfer, item->FVNoDrop);
if (item->IsClassBook()) {
c->Message(Chat::White, "*** This item is a Book (filename:'%s') ***", item->Filename);
}
else if (item->IsClassBag()) {
c->Message(Chat::White, "*** This item is a Container (%u slots) ***", item->BagSlots);
}
else {
c->Message(Chat::White, "*** This item is Common ***");
c->Message(Chat::White, ">> Classes: %u, Races: %u, Slots: %u", item->Classes, item->Races, item->Slots);
c->Message(Chat::White, ">> ReqSkill: %u, ReqLevel: %u, RecLevel: %u", item->RecSkill, item->ReqLevel, item->RecLevel);
c->Message(Chat::White, ">> SkillModType: %u, SkillModValue: %i", item->SkillModType, item->SkillModValue);
c->Message(Chat::White, ">> BaneRaceType: %u, BaneRaceDamage: %u, BaneBodyType: %u, BaneBodyDamage: %i",
item->BaneDmgRace, item->BaneDmgRaceAmt, item->BaneDmgBody, item->BaneDmgAmt);
c->Message(Chat::White, ">> Magic: %s, SpellID: %i, ProcLevel: %u, Charges: %u, MaxCharges: %u",
(item->Magic ? "TRUE" : "FALSE"), item->Click.Effect, item->Click.Level, inst->GetCharges(), item->MaxCharges);
c->Message(Chat::White, ">> EffectType: 0x%02X, CastTime: %.2f", (uint8)item->Click.Type, ((double)item->CastTime / 1000));
}
if (c->Admin() >= 200)
c->Message(Chat::White, ">> MinStatus: %u", item->MinStatus);
}
void command_uptime(Client *c, const Seperator *sep)
{
if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else
{
auto pack = new ServerPacket(ServerOP_Uptime, sizeof(ServerUptime_Struct));
ServerUptime_Struct* sus = (ServerUptime_Struct*) pack->pBuffer;
strcpy(sus->adminname, c->GetName());
if (sep->IsNumber(1) && atoi(sep->arg[1]) > 0)
sus->zoneserverid = atoi(sep->arg[1]);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void command_flag(Client *c, const Seperator *sep)
{
if(sep->arg[2][0] == 0) {
if (!c->GetTarget() || (c->GetTarget() && c->GetTarget() == c)) {
c->UpdateAdmin();
c->Message(Chat::White, "Refreshed your admin flag from DB.");
} else if (c->GetTarget() && c->GetTarget() != c && c->GetTarget()->IsClient()) {
c->GetTarget()->CastToClient()->UpdateAdmin();
c->Message(Chat::White, "%s's admin flag has been refreshed.", c->GetTarget()->GetName());
c->GetTarget()->Message(Chat::White, "%s refreshed your admin flag.", c->GetName());
}
}
else if (!sep->IsNumber(1) || atoi(sep->arg[1]) < -2 || atoi(sep->arg[1]) > 255 || strlen(sep->arg[2]) == 0)
c->Message(Chat::White, "Usage: #flag [status] [acctname]");
else if (c->Admin() < commandChangeFlags) {
//this check makes banning players by less than this level
//impossible, but i'll leave it in anyways
c->Message(Chat::White, "You may only refresh your own flag, doing so now.");
c->UpdateAdmin();
}
else {
if (atoi(sep->arg[1]) > c->Admin())
c->Message(Chat::White, "You cannot set people's status to higher than your own");
else if (atoi(sep->arg[1]) < 0 && c->Admin() < commandBanPlayers)
c->Message(Chat::White, "You have too low of status to suspend/ban");
else if (!database.SetAccountStatus(sep->argplus[2], atoi(sep->arg[1])))
c->Message(Chat::White, "Unable to set GM Flag.");
else {
c->Message(Chat::White, "Set GM Flag on account.");
std::string user;
std::string loginserver;
ParseAccountString(sep->argplus[2], user, loginserver);
ServerPacket pack(ServerOP_FlagUpdate, 6);
*((uint32*) pack.pBuffer) = database.GetAccountIDByName(user.c_str(), loginserver.c_str());
*((int16*) &pack.pBuffer[4]) = atoi(sep->arg[1]);
worldserver.SendPacket(&pack);
}
}
}
void command_time(Client *c, const Seperator *sep)
{
char timeMessage[255];
int minutes=0;
if(sep->IsNumber(1)) {
if(sep->IsNumber(2)) {
minutes=atoi(sep->arg[2]);
}
c->Message(Chat::Red, "Setting world time to %s:%i (Timezone: 0)...", sep->arg[1], minutes);
zone->SetTime(atoi(sep->arg[1])+1, minutes);
LogInfo("{} :: Setting world time to {}:{} (Timezone: 0)...", c->GetCleanName(), sep->arg[1], minutes);
}
else {
c->Message(Chat::Red, "To set the Time: #time HH [MM]");
TimeOfDay_Struct eqTime;
zone->zone_time.GetCurrentEQTimeOfDay( time(0), &eqTime);
sprintf(timeMessage,"%02d:%s%d %s (Timezone: %ih %im)",
((eqTime.hour - 1) % 12) == 0 ? 12 : ((eqTime.hour - 1) % 12),
(eqTime.minute < 10) ? "0" : "",
eqTime.minute,
(eqTime.hour >= 13) ? "pm" : "am",
zone->zone_time.getEQTimeZoneHr(),
zone->zone_time.getEQTimeZoneMin()
);
c->Message(Chat::Red, "It is now %s.", timeMessage);
LogInfo("Current Time is: {}", timeMessage);
}
}
void command_guild(Client *c, const Seperator *sep)
{
int admin=c->Admin();
Mob *target=c->GetTarget();
if (strcasecmp(sep->arg[1], "help") == 0) {
c->Message(Chat::White, "GM Guild commands:");
c->Message(Chat::White, " #guild list - lists all guilds on the server");
c->Message(Chat::White, " #guild create {guildleader charname or CharID} guildname");
c->Message(Chat::White, " #guild delete guildID");
c->Message(Chat::White, " #guild rename guildID newname");
c->Message(Chat::White, " #guild set charname guildID (0=no guild)");
c->Message(Chat::White, " #guild setrank charname rank");
c->Message(Chat::White, " #guild setleader guildID {guildleader charname or CharID}");
}
else if (strcasecmp(sep->arg[1], "status") == 0 || strcasecmp(sep->arg[1], "stat") == 0) {
Client* client = 0;
if (sep->arg[2][0] != 0)
client = entity_list.GetClientByName(sep->argplus[2]);
else if (target != 0 && target->IsClient())
client = target->CastToClient();
if (client == 0)
c->Message(Chat::White, "You must target someone or specify a character name");
else if ((client->Admin() >= minStatusToEditOtherGuilds && admin < minStatusToEditOtherGuilds) && client->GuildID() != c->GuildID()) // no peeping for GMs, make sure tell message stays the same
c->Message(Chat::White, "You must target someone or specify a character name.");
else {
if (client->IsInAGuild())
c->Message(Chat::White, "%s is not in a guild.", client->GetName());
else if (guild_mgr.IsGuildLeader(client->GuildID(), client->CharacterID()))
c->Message(Chat::White, "%s is the leader of <%s> rank: %s", client->GetName(), guild_mgr.GetGuildName(client->GuildID()), guild_mgr.GetRankName(client->GuildID(), client->GuildRank()));
else
c->Message(Chat::White, "%s is a member of <%s> rank: %s", client->GetName(), guild_mgr.GetGuildName(client->GuildID()), guild_mgr.GetRankName(client->GuildID(), client->GuildRank()));
}
}
else if (strcasecmp(sep->arg[1], "info") == 0) {
if (sep->arg[2][0] == 0 && c->IsInAGuild()) {
if (admin >= minStatusToEditOtherGuilds)
c->Message(Chat::White, "Usage: #guildinfo guild_id");
else
c->Message(Chat::White, "You're not in a guild");
}
else {
uint32 tmp = GUILD_NONE;
if (sep->arg[2][0] == 0)
tmp = c->GuildID();
else if (admin >= minStatusToEditOtherGuilds)
tmp = atoi(sep->arg[2]);
if(tmp != GUILD_NONE)
guild_mgr.DescribeGuild(c, tmp);
}
}
/*
else if (strcasecmp(sep->arg[1], "edit") == 0) {
if (c->GuildDBID() == 0)
c->Message(Chat::White, "You arent in a guild!");
else if (!sep->IsNumber(2))
c->Message(Chat::White, "Error: invalid rank #.");
else if (atoi(sep->arg[2]) < 0 || atoi(sep->arg[2]) > GUILD_MAX_RANK)
c->Message(Chat::White, "Error: invalid rank #.");
else if (!c->GuildRank() == 0)
c->Message(Chat::White, "You must be rank %s to use edit.", guilds[c->GuildEQID()].rank[0].rankname);
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
if (!helper_guild_edit(c, c->GuildDBID(), c->GuildEQID(), atoi(sep->arg[2]), sep->arg[3], sep->argplus[4])) {
c->Message(Chat::White, " #guild edit rank title newtitle");
c->Message(Chat::White, " #guild edit rank permission 0/1");
}
else {
ServerPacket* pack = new ServerPacket(ServerOP_RefreshGuild, 5);
int32 geqid=c->GuildEQID();
memcpy(pack->pBuffer, &geqid, 4);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
}
else if (strcasecmp(sep->arg[1], "gmedit") == 0 && admin >= 100) {
if (!sep->IsNumber(2))
c->Message(Chat::White, "Error: invalid guilddbid.");
else if (!sep->IsNumber(3))
c->Message(Chat::White, "Error: invalid rank #.");
else if (atoi(sep->arg[3]) < 0 || atoi(sep->arg[3]) > GUILD_MAX_RANK)
c->Message(Chat::White, "Error: invalid rank #.");
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
uint32 eqid = database.GetGuildEQID(atoi(sep->arg[2]));
if (eqid == GUILD_NONE)
c->Message(Chat::White, "Error: Guild not found");
else if (!helper_guild_edit(c, atoi(sep->arg[2]), eqid, atoi(sep->arg[3]), sep->arg[4], sep->argplus[5])) {
c->Message(Chat::White, " #guild gmedit guilddbid rank title newtitle");
c->Message(Chat::White, " #guild gmedit guilddbid rank permission 0/1");
}
else {
ServerPacket* pack = new ServerPacket(ServerOP_RefreshGuild, 5);
memcpy(pack->pBuffer, &eqid, 4);
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
}
*/
else if (strcasecmp(sep->arg[1], "set") == 0) {
if (!sep->IsNumber(3))
c->Message(Chat::White, "Usage: #guild set charname guildgbid (0 = clear guildtag)");
else {
uint32 guild_id = atoi(sep->arg[3]);
if(guild_id == 0)
guild_id = GUILD_NONE;
else if(!guild_mgr.GuildExists(guild_id)) {
c->Message(Chat::Red, "Guild %d does not exist.", guild_id);
return;
}
uint32 charid = database.GetCharacterID(sep->arg[2]);
if(charid == 0) {
c->Message(Chat::Red, "Unable to find character '%s'", charid);
return;
}
//we could do the checking we need for guild_mgr.CheckGMStatus, but im lazy right now
if(admin < minStatusToEditOtherGuilds) {
c->Message(Chat::Red, "Access denied.");
return;
}
if(guild_id == GUILD_NONE) {
LogGuilds("[{}]: Removing [{}] ([{}]) from guild with GM command", c->GetName(), sep->arg[2], charid);
} else {
LogGuilds("[{}]: Putting [{}] ([{}]) into guild [{}] ([{}]) with GM command", c->GetName(), sep->arg[2], charid, guild_mgr.GetGuildName(guild_id), guild_id);
}
if(!guild_mgr.SetGuild(charid, guild_id, GUILD_MEMBER)) {
c->Message(Chat::Red, "Error putting '%s' into guild %d", sep->arg[2], guild_id);
} else {
c->Message(Chat::White, "%s has been put into guild %d", sep->arg[2], guild_id);
}
}
}
/*else if (strcasecmp(sep->arg[1], "setdoor") == 0 && admin >= minStatusToEditOtherGuilds) {
if (!sep->IsNumber(2))
c->Message(Chat::White, "Usage: #guild setdoor guildEQid (0 = delete guilddoor)");
else {
// guild doors
if((!guilds[atoi(sep->arg[2])].databaseID) && (atoi(sep->arg[2])!=0) )
{
c->Message(Chat::White, "These is no guild with this guildEQid");
}
else {
c->SetIsSettingGuildDoor(true);
c->Message(Chat::White, "Click on a door you want to become a guilddoor");
c->SetSetGuildDoorID(atoi(sep->arg[2]));
}
}
}*/
else if (strcasecmp(sep->arg[1], "setrank") == 0) {
int rank = atoi(sep->arg[3]);
if (!sep->IsNumber(3))
c->Message(Chat::White, "Usage: #guild setrank charname rank");
else if (rank < 0 || rank > GUILD_MAX_RANK)
c->Message(Chat::White, "Error: invalid rank #.");
else {
uint32 charid = database.GetCharacterID(sep->arg[2]);
if(charid == 0) {
c->Message(Chat::Red, "Unable to find character '%s'", charid);
return;
}
//we could do the checking we need for guild_mgr.CheckGMStatus, but im lazy right now
if(admin < minStatusToEditOtherGuilds) {
c->Message(Chat::Red, "Access denied.");
return;
}
LogGuilds("[{}]: Setting [{}] ([{}])'s guild rank to [{}] with GM command", c->GetName(), sep->arg[2], charid, rank);
if(!guild_mgr.SetGuildRank(charid, rank))
c->Message(Chat::Red, "Error while setting rank %d on '%s'.", rank, sep->arg[2]);
else
c->Message(Chat::White, "%s has been set to rank %d", sep->arg[2], rank);
}
}
else if (strcasecmp(sep->arg[1], "create") == 0) {
if (sep->arg[3][0] == 0)
c->Message(Chat::White, "Usage: #guild create {guildleader charname or CharID} guild name");
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
uint32 leader = 0;
if (sep->IsNumber(2)) {
leader = atoi(sep->arg[2]);
} else if((leader=database.GetCharacterID(sep->arg[2])) != 0) {
//got it from the db..
} else {
c->Message(Chat::Red, "Unable to find char '%s'", sep->arg[2]);
return;
}
if (leader == 0) {
c->Message(Chat::White, "Guild leader not found.");
return;
}
uint32 tmp = guild_mgr.FindGuildByLeader(leader);
if (tmp != GUILD_NONE) {
c->Message(Chat::White, "Error: %s already is the leader of DB# %i '%s'.", sep->arg[2], tmp, guild_mgr.GetGuildName(tmp));
}
else {
if(admin < minStatusToEditOtherGuilds) {
c->Message(Chat::Red, "Access denied.");
return;
}
uint32 id = guild_mgr.CreateGuild(sep->argplus[3], leader);
LogGuilds("[{}]: Creating guild [{}] with leader [{}] with GM command. It was given id [{}]", c->GetName(),
sep->argplus[3], leader, (unsigned long)id);
if (id == GUILD_NONE)
c->Message(Chat::White, "Guild creation failed.");
else {
c->Message(Chat::White, "Guild created: Leader: %i, number %i: %s", leader, id, sep->argplus[3]);
if(!guild_mgr.SetGuild(leader, id, GUILD_LEADER))
c->Message(Chat::White, "Unable to set guild leader's guild in the database. Your going to have to run #guild set");
}
}
}
}
else if (strcasecmp(sep->arg[1], "delete") == 0) {
if (!sep->IsNumber(2))
c->Message(Chat::White, "Usage: #guild delete guildID");
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
uint32 id = atoi(sep->arg[2]);
if(!guild_mgr.GuildExists(id)) {
c->Message(Chat::White, "Guild %d does not exist!", id);
return;
}
if(admin < minStatusToEditOtherGuilds) {
//this person is not allowed to just edit any guild, check this guild's min status.
if(c->GuildID() != id) {
c->Message(Chat::Red, "Access denied to edit other people's guilds");
return;
} else if(!guild_mgr.CheckGMStatus(id, admin)) {
c->Message(Chat::Red, "Access denied to edit your guild with GM commands.");
return;
}
}
LogGuilds("[{}]: Deleting guild [{}] ([{}]) with GM command", c->GetName(),
guild_mgr.GetGuildName(id), id);
if (!guild_mgr.DeleteGuild(id))
c->Message(Chat::White, "Guild delete failed.");
else {
c->Message(Chat::White, "Guild %d deleted.", id);
}
}
}
else if (strcasecmp(sep->arg[1], "rename") == 0) {
if ((!sep->IsNumber(2)) || sep->arg[3][0] == 0)
c->Message(Chat::White, "Usage: #guild rename guildID newname");
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
uint32 id = atoi(sep->arg[2]);
if(!guild_mgr.GuildExists(id)) {
c->Message(Chat::White, "Guild %d does not exist!", id);
return;
}
if(admin < minStatusToEditOtherGuilds) {
//this person is not allowed to just edit any guild, check this guild's min status.
if(c->GuildID() != id) {
c->Message(Chat::Red, "Access denied to edit other people's guilds");
return;
} else if(!guild_mgr.CheckGMStatus(id, admin)) {
c->Message(Chat::Red, "Access denied to edit your guild with GM commands.");
return;
}
}
LogGuilds("[{}]: Renaming guild [{}] ([{}]) to [{}] with GM command", c->GetName(),
guild_mgr.GetGuildName(id), id, sep->argplus[3]);
if (!guild_mgr.RenameGuild(id, sep->argplus[3]))
c->Message(Chat::White, "Guild rename failed.");
else {
c->Message(Chat::White, "Guild %d renamed to %s", id, sep->argplus[3]);
}
}
}
else if (strcasecmp(sep->arg[1], "setleader") == 0) {
if (sep->arg[3][0] == 0 || !sep->IsNumber(2))
c->Message(Chat::White, "Usage: #guild setleader guild_id {guildleader charname or CharID}");
else if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server dirconnected");
else {
uint32 leader = 0;
if (sep->IsNumber(2)) {
leader = atoi(sep->arg[2]);
} else if((leader=database.GetCharacterID(sep->arg[2])) != 0) {
//got it from the db..
} else {
c->Message(Chat::Red, "Unable to find char '%s'", sep->arg[2]);
return;
}
uint32 tmpdb = guild_mgr.FindGuildByLeader(leader);
if (leader == 0)
c->Message(Chat::White, "New leader not found.");
else if (tmpdb != 0) {
c->Message(Chat::White, "Error: %s already is the leader of guild # %i", sep->arg[2], tmpdb);
}
else {
uint32 id = atoi(sep->arg[2]);
if(!guild_mgr.GuildExists(id)) {
c->Message(Chat::White, "Guild %d does not exist!", id);
return;
}
if(admin < minStatusToEditOtherGuilds) {
//this person is not allowed to just edit any guild, check this guild's min status.
if(c->GuildID() != id) {
c->Message(Chat::Red, "Access denied to edit other people's guilds");
return;
} else if(!guild_mgr.CheckGMStatus(id, admin)) {
c->Message(Chat::Red, "Access denied to edit your guild with GM commands.");
return;
}
}
LogGuilds("[{}]: Setting leader of guild [{}] ([{}]) to [{}] with GM command", c->GetName(),
guild_mgr.GetGuildName(id), id, leader);
if(!guild_mgr.SetGuildLeader(id, leader))
c->Message(Chat::White, "Guild leader change failed.");
else {
c->Message(Chat::White, "Guild leader changed: guild # %d, Leader: %s", id, sep->argplus[3]);
}
}
}
}
else if (strcasecmp(sep->arg[1], "list") == 0) {
if(admin < minStatusToEditOtherGuilds) {
c->Message(Chat::Red, "Access denied.");
return;
}
guild_mgr.ListGuilds(c);
}
else {
c->Message(Chat::White, "Unknown guild command, try #guild help");
}
}
/*
bool helper_guild_edit(Client *c, uint32 dbid, uint32 eqid, uint8 rank, const char* what, const char* value) {
struct GuildRankLevel_Struct grl;
strcpy(grl.rankname, guild_mgr.GetRankName(eqid, rank));
grl.demote = guilds[eqid].rank[rank].demote;
grl.heargu = guilds[eqid].rank[rank].heargu;
grl.invite = guilds[eqid].rank[rank].invite;
grl.motd = guilds[eqid].rank[rank].motd;
grl.promote = guilds[eqid].rank[rank].promote;
grl.remove = guilds[eqid].rank[rank].remove;
grl.speakgu = guilds[eqid].rank[rank].speakgu;
grl.warpeace = guilds[eqid].rank[rank].warpeace;
if (strcasecmp(what, "title") == 0) {
if (strlen(value) > 100)
c->Message(Chat::White, "Error: Title has a maxium length of 100 characters.");
else
strcpy(grl.rankname, value);
}
else if (rank == 0)
c->Message(Chat::White, "Error: Rank 0's permissions can not be changed.");
else {
if (!(strlen(value) == 1 && (value[0] == '0' || value[0] == '1')))
return false;
if (strcasecmp(what, "demote") == 0)
grl.demote = (value[0] == '1');
else if (strcasecmp(what, "heargu") == 0)
grl.heargu = (value[0] == '1');
else if (strcasecmp(what, "invite") == 0)
grl.invite = (value[0] == '1');
else if (strcasecmp(what, "motd") == 0)
grl.motd = (value[0] == '1');
else if (strcasecmp(what, "promote") == 0)
grl.promote = (value[0] == '1');
else if (strcasecmp(what, "remove") == 0)
grl.remove = (value[0] == '1');
else if (strcasecmp(what, "speakgu") == 0)
grl.speakgu = (value[0] == '1');
else if (strcasecmp(what, "warpeace") == 0)
grl.warpeace = (value[0] == '1');
else
c->Message(Chat::White, "Error: Permission name not recognized.");
}
if (!database.EditGuild(dbid, rank, &grl))
c->Message(Chat::White, "Error: database.EditGuild() failed");
return true;
}*/
void command_zonestatus(Client *c, const Seperator *sep)
{
if (!worldserver.Connected())
c->Message(Chat::White, "Error: World server disconnected");
else {
auto pack = new ServerPacket(ServerOP_ZoneStatus, strlen(c->GetName()) + 2);
memset(pack->pBuffer, (uint8) c->Admin(), 1);
strcpy((char *) &pack->pBuffer[1], c->GetName());
worldserver.SendPacket(pack);
delete pack;
}
}
void command_doanim(Client *c, const Seperator *sep)
{
if (!sep->IsNumber(1))
c->Message(Chat::White, "Usage: #DoAnim [number]");
else
if (c->Admin() >= commandDoAnimOthers)
if (c->GetTarget() == 0)
c->Message(Chat::White, "Error: You need a target.");
else
c->GetTarget()->DoAnim(atoi(sep->arg[1]),atoi(sep->arg[2]));
else
c->DoAnim(atoi(sep->arg[1]),atoi(sep->arg[2]));
}
void command_randomfeatures(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!target)
c->Message(Chat::White,"Error: This command requires a target");
else
{
if (target->RandomizeFeatures())
c->Message(Chat::White,"Features Randomized");
else
c->Message(Chat::White,"This command requires a Playable Race as the target");
}
}
void command_face(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #face [number of face]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = atoi(sep->arg[1]);
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Face = %i", atoi(sep->arg[1]));
}
}
void command_findaliases(Client *c, const Seperator *sep)
{
if (!sep->arg[1][0]) {
c->Message(Chat::White, "Usage: #findaliases [alias | command]");
return;
}
auto find_iter = commandaliases.find(sep->arg[1]);
if (find_iter == commandaliases.end()) {
c->Message(Chat::Yellow, "No commands or aliases match '%s'", sep->arg[1]);
return;
}
auto command_iter = commandlist.find(find_iter->second);
if (find_iter->second.empty() || command_iter == commandlist.end()) {
c->Message(Chat::White, "An unknown condition occurred...");
return;
}
c->Message(Chat::White, "Available command aliases for '%s':", command_iter->first.c_str());
int commandaliasesshown = 0;
for (auto alias_iter = commandaliases.begin(); alias_iter != commandaliases.end(); ++alias_iter) {
if (strcasecmp(find_iter->second.c_str(), alias_iter->second.c_str()) || c->Admin() < command_iter->second->access)
continue;
c->Message(Chat::White, "%c%s", COMMAND_CHAR, alias_iter->first.c_str());
++commandaliasesshown;
}
c->Message(Chat::White, "%d command alias%s listed.", commandaliasesshown, commandaliasesshown != 1 ? "es" : "");
}
void command_details(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #details [number of drakkin detail]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = atoi(sep->arg[1]);
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Details = %i", atoi(sep->arg[1]));
}
}
void command_heritage(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #heritage [number of Drakkin heritage]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = atoi(sep->arg[1]);
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Heritage = %i", atoi(sep->arg[1]));
}
}
void command_tattoo(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #tattoo [number of Drakkin tattoo]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = atoi(sep->arg[1]);
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Tattoo = %i", atoi(sep->arg[1]));
}
}
void command_helm(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #helm [number of helm texture]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = atoi(sep->arg[1]);
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Helm = %i", atoi(sep->arg[1]));
}
}
void command_hair(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #hair [number of hair style]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = atoi(sep->arg[1]);
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Hair = %i", atoi(sep->arg[1]));
}
}
void command_haircolor(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #haircolor [number of hair color]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = atoi(sep->arg[1]);
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Hair Color = %i", atoi(sep->arg[1]));
}
}
void command_beard(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #beard [number of beard style]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = target->GetBeardColor();
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = atoi(sep->arg[1]);
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Beard = %i", atoi(sep->arg[1]));
}
}
void command_beardcolor(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (!sep->IsNumber(1))
c->Message(Chat::White,"Usage: #beardcolor [number of beard color]");
else if (!target)
c->Message(Chat::White,"Error: this command requires a target");
else {
uint16 Race = target->GetRace();
uint8 Gender = target->GetGender();
uint8 Texture = 0xFF;
uint8 HelmTexture = 0xFF;
uint8 HairColor = target->GetHairColor();
uint8 BeardColor = atoi(sep->arg[1]);
uint8 EyeColor1 = target->GetEyeColor1();
uint8 EyeColor2 = target->GetEyeColor2();
uint8 HairStyle = target->GetHairStyle();
uint8 LuclinFace = target->GetLuclinFace();
uint8 Beard = target->GetBeard();
uint32 DrakkinHeritage = target->GetDrakkinHeritage();
uint32 DrakkinTattoo = target->GetDrakkinTattoo();
uint32 DrakkinDetails = target->GetDrakkinDetails();
target->SendIllusionPacket(Race, Gender, Texture, HelmTexture, HairColor, BeardColor,
EyeColor1, EyeColor2, HairStyle, LuclinFace, Beard, 0xFF,
DrakkinHeritage, DrakkinTattoo, DrakkinDetails);
c->Message(Chat::White,"Beard Color = %i", atoi(sep->arg[1]));
}
}
void command_scribespells(Client *c, const Seperator *sep)
{
Client *t = c;
if (c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
if(sep->argnum < 1 || !sep->IsNumber(1)) {
c->Message(Chat::White, "FORMAT: #scribespells <max level> <min level>");
return;
}
uint8 max_level = (uint8)atol(sep->arg[1]);
if (!c->GetGM() && max_level > (uint8)RuleI(Character, MaxLevel))
max_level = (uint8)RuleI(Character, MaxLevel); // default to Character:MaxLevel if we're not a GM & it's higher than the max level
uint8 min_level = (sep->IsNumber(2) ? (uint8)atol(sep->arg[2]) : 1); // default to 1 if there isn't a 2nd argument
if (!c->GetGM() && min_level > (uint8)RuleI(Character, MaxLevel))
min_level = (uint8)RuleI(Character, MaxLevel); // default to Character:MaxLevel if we're not a GM & it's higher than the max level
if(max_level < 1 || min_level < 1) {
c->Message(Chat::White, "ERROR: Level must be greater than 1.");
return;
}
if (min_level > max_level) {
c->Message(Chat::White, "ERROR: Min Level must be less than or equal to Max Level.");
return;
}
t->Message(Chat::White, "Scribing spells to spellbook.");
if(t != c)
c->Message(Chat::White, "Scribing spells for %s.", t->GetName());
LogInfo("Scribe spells request for [{}] from [{}], levels: [{}] -> [{}]", t->GetName(), c->GetName(), min_level, max_level);
int book_slot = t->GetNextAvailableSpellBookSlot();
int spell_id = 0;
int count = 0;
for ( ; spell_id < SPDAT_RECORDS && book_slot < EQEmu::spells::SPELLBOOK_SIZE; ++spell_id) {
if (book_slot == -1) {
t->Message(
13,
"Unable to scribe spell %s (%i) to spellbook: no more spell book slots available.",
((spell_id >= 0 && spell_id < SPDAT_RECORDS) ? spells[spell_id].name : "Out-of-range"),
spell_id
);
if (t != c)
c->Message(
13,
"Error scribing spells: %s ran out of spell book slots on spell %s (%i)",
t->GetName(),
((spell_id >= 0 && spell_id < SPDAT_RECORDS) ? spells[spell_id].name : "Out-of-range"),
spell_id
);
break;
}
if (spell_id < 0 || spell_id >= SPDAT_RECORDS) {
c->Message(Chat::Red, "FATAL ERROR: Spell id out-of-range (id: %i, min: 0, max: %i)", spell_id, SPDAT_RECORDS);
return;
}
if (book_slot < 0 || book_slot >= EQEmu::spells::SPELLBOOK_SIZE) {
c->Message(Chat::Red, "FATAL ERROR: Book slot out-of-range (slot: %i, min: 0, max: %i)", book_slot, EQEmu::spells::SPELLBOOK_SIZE);
return;
}
while (true) {
if (spells[spell_id].classes[WARRIOR] == 0) // check if spell exists
break;
if (spells[spell_id].classes[t->GetPP().class_ - 1] > max_level) // maximum level
break;
if (spells[spell_id].classes[t->GetPP().class_ - 1] < min_level) // minimum level
break;
if (spells[spell_id].skill == 52)
break;
uint16 spell_id_ = (uint16)spell_id;
if ((spell_id_ != spell_id) || (spell_id != spell_id_)) {
c->Message(Chat::Red, "FATAL ERROR: Type conversion data loss with spell_id (%i != %u)", spell_id, spell_id_);
return;
}
if (!IsDiscipline(spell_id_) && !t->HasSpellScribed(spell_id)) { // isn't a discipline & we don't already have it scribed
t->ScribeSpell(spell_id_, book_slot);
++count;
}
break;
}
book_slot = t->GetNextAvailableSpellBookSlot(book_slot);
}
if (count > 0) {
t->Message(Chat::White, "Successfully scribed %i spells.", count);
if (t != c)
c->Message(Chat::White, "Successfully scribed %i spells for %s.", count, t->GetName());
}
else {
t->Message(Chat::White, "No spells scribed.");
if (t != c)
c->Message(Chat::White, "No spells scribed for %s.", t->GetName());
}
}
void command_scribespell(Client *c, const Seperator *sep) {
uint16 spell_id = 0;
uint16 book_slot = -1;
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t=c->GetTarget()->CastToClient();
if(!sep->arg[1][0]) {
c->Message(Chat::White, "FORMAT: #scribespell <spellid>");
return;
}
spell_id = atoi(sep->arg[1]);
if(IsValidSpell(spell_id)) {
t->Message(Chat::White, "Scribing spell: %s (%i) to spellbook.", spells[spell_id].name, spell_id);
if(t != c)
c->Message(Chat::White, "Scribing spell: %s (%i) for %s.", spells[spell_id].name, spell_id, t->GetName());
LogInfo("Scribe spell: [{}] ([{}]) request for [{}] from [{}]", spells[spell_id].name, spell_id, t->GetName(), c->GetName());
if (spells[spell_id].classes[WARRIOR] != 0 && spells[spell_id].skill != 52 && spells[spell_id].classes[t->GetPP().class_ - 1] > 0 && !IsDiscipline(spell_id)) {
book_slot = t->GetNextAvailableSpellBookSlot();
if(book_slot >= 0 && t->FindSpellBookSlotBySpellID(spell_id) < 0)
t->ScribeSpell(spell_id, book_slot);
else {
t->Message(Chat::Red, "Unable to scribe spell: %s (%i) to your spellbook.", spells[spell_id].name, spell_id);
if(t != c)
c->Message(Chat::Red, "Unable to scribe spell: %s (%i) for %s.", spells[spell_id].name, spell_id, t->GetName());
}
}
else
c->Message(Chat::Red, "Your target can not scribe this spell.");
}
else
c->Message(Chat::Red, "Spell ID: %i is an unknown spell and cannot be scribed.", spell_id);
}
void command_unscribespell(Client *c, const Seperator *sep) {
uint16 spell_id = 0;
uint16 book_slot = -1;
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t=c->GetTarget()->CastToClient();
if(!sep->arg[1][0]) {
c->Message(Chat::White, "FORMAT: #unscribespell <spellid>");
return;
}
spell_id = atoi(sep->arg[1]);
if(IsValidSpell(spell_id)) {
book_slot = t->FindSpellBookSlotBySpellID(spell_id);
if(book_slot >= 0) {
t->UnscribeSpell(book_slot);
t->Message(Chat::White, "Unscribing spell: %s (%i) from spellbook.", spells[spell_id].name, spell_id);
if(t != c)
c->Message(Chat::White, "Unscribing spell: %s (%i) for %s.", spells[spell_id].name, spell_id, t->GetName());
LogInfo("Unscribe spell: [{}] ([{}]) request for [{}] from [{}]", spells[spell_id].name, spell_id, t->GetName(), c->GetName());
}
else {
t->Message(Chat::Red, "Unable to unscribe spell: %s (%i) from your spellbook. This spell is not scribed.", spells[spell_id].name, spell_id);
if(t != c)
c->Message(Chat::Red, "Unable to unscribe spell: %s (%i) for %s due to spell not scribed.", spells[spell_id].name, spell_id, t->GetName());
}
}
}
void command_unscribespells(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t=c->GetTarget()->CastToClient();
t->UnscribeSpellAll();
}
void command_untraindisc(Client *c, const Seperator *sep) {
Client *t = c;
if (c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
for (int i = 0; i < MAX_PP_DISCIPLINES; i++) {
if (t->GetPP().disciplines.values[i] == atoi(sep->arg[1])) {
t->UntrainDisc(i, 1);
return;
}
}
}
void command_untraindiscs(Client *c, const Seperator *sep) {
Client *t = c;
if (c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
t->UntrainDiscAll();
}
void command_wpinfo(Client *c, const Seperator *sep)
{
Mob *t=c->GetTarget();
if (t == nullptr || !t->IsNPC()) {
c->Message(Chat::White,"You must target an NPC to use this.");
return;
}
NPC *n = t->CastToNPC();
n->DisplayWaypointInfo(c);
}
void command_wpadd(Client *c, const Seperator *sep)
{
int type1 = 0,
type2 = 0,
pause = 0; // Defaults for a new grid
Mob *target = c->GetTarget();
if (target && target->IsNPC()) {
Spawn2 *s2info = target->CastToNPC()->respawn2;
if (s2info ==
nullptr) // Can't figure out where this mob's spawn came from... maybe a dynamic mob created by #spawn
{
c->Message(
Chat::White,
"#wpadd FAILED -- Can't determine which spawn record in the database this mob came from!"
);
return;
}
if (sep->arg[1][0]) {
if (atoi(sep->arg[1]) >= 0) {
pause = atoi(sep->arg[1]);
}
else {
c->Message(Chat::White, "Usage: #wpadd [pause] [-h]");
return;
}
}
auto position = c->GetPosition();
if (strcmp("-h", sep->arg[2]) != 0) {
position.w = -1;
}
uint32 tmp_grid = database.AddWPForSpawn(c, s2info->GetID(), position, pause, type1, type2, zone->GetZoneID());
if (tmp_grid) {
target->CastToNPC()->SetGrid(tmp_grid);
}
target->CastToNPC()->AssignWaypoints(target->CastToNPC()->GetGrid());
c->Message(
Chat::White,
"Waypoint added. Use #wpinfo to see waypoints for this NPC (may need to #repop first)."
);
}
else {
c->Message(Chat::White, "You must target an NPC to use this.");
}
}
void command_interrupt(Client *c, const Seperator *sep)
{
uint16 ci_message=0x01b7, ci_color=0x0121;
if(sep->arg[1][0])
ci_message=atoi(sep->arg[1]);
if(sep->arg[2][0])
ci_color=atoi(sep->arg[2]);
c->InterruptSpell(ci_message, ci_color);
}
void command_summonitem(Client *c, const Seperator *sep)
{
uint32 itemid = 0;
std::string cmd_msg = sep->msg;
size_t link_open = cmd_msg.find('\x12');
size_t link_close = cmd_msg.find_last_of('\x12');
if (link_open != link_close && (cmd_msg.length() - link_open) > EQEmu::constants::SAY_LINK_BODY_SIZE) {
EQEmu::SayLinkBody_Struct link_body;
EQEmu::saylink::DegenerateLinkBody(link_body, cmd_msg.substr(link_open + 1, EQEmu::constants::SAY_LINK_BODY_SIZE));
itemid = link_body.item_id;
}
else if (!sep->IsNumber(1)) {
c->Message(Chat::White, "Usage: #summonitem [item id | link] [charges], charges are optional");
return;
}
else {
itemid = atoi(sep->arg[1]);
}
if (!itemid) {
c->Message(Chat::White, "A valid item id number is required (derived: 0)");
return;
}
int16 item_status = 0;
const EQEmu::ItemData* item = database.GetItem(itemid);
if (item) {
item_status = static_cast<int16>(item->MinStatus);
}
if (item_status > c->Admin())
c->Message(Chat::Red, "Error: Insufficient status to summon this item.");
else if (sep->argnum == 2 && sep->IsNumber(2))
c->SummonItem(itemid, atoi(sep->arg[2]));
else if (sep->argnum == 3)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]));
else if (sep->argnum == 4)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]));
else if (sep->argnum == 5)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]));
else if (sep->argnum == 6)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]));
else if (sep->argnum == 7)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]), atoi(sep->arg[7]));
else if (sep->argnum == 8)
c->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]), atoi(sep->arg[7]), atoi(sep->arg[8]));
else {
c->SummonItem(itemid);
}
}
void command_giveitem(Client *c, const Seperator *sep)
{
if (!sep->IsNumber(1)) {
c->Message(Chat::Red, "Usage: #summonitem [item id] [charges], charges are optional");
} else if(c->GetTarget() == nullptr) {
c->Message(Chat::Red, "You must target a client to give the item to.");
} else if(!c->GetTarget()->IsClient()) {
c->Message(Chat::Red, "You can only give items to players with this command.");
} else {
Client *t = c->GetTarget()->CastToClient();
uint32 itemid = atoi(sep->arg[1]);
int16 item_status = 0;
const EQEmu::ItemData* item = database.GetItem(itemid);
if(item) {
item_status = static_cast<int16>(item->MinStatus);
}
if (item_status > c->Admin())
c->Message(Chat::Red, "Error: Insufficient status to summon this item.");
else if (sep->argnum==2 && sep->IsNumber(2))
t->SummonItem(itemid, atoi(sep->arg[2]));
else if (sep->argnum==3)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]));
else if (sep->argnum==4)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]));
else if (sep->argnum==5)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]));
else if (sep->argnum==6)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]));
else if (sep->argnum==7)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]), atoi(sep->arg[7]));
else if (sep->argnum == 7)
t->SummonItem(itemid, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), atoi(sep->arg[5]), atoi(sep->arg[6]), atoi(sep->arg[7]), atoi(sep->arg[8]));
else {
t->SummonItem(itemid);
}
}
}
void command_givemoney(Client *c, const Seperator *sep)
{
if (!sep->IsNumber(1)) { //as long as the first one is a number, we'll just let atoi convert the rest to 0 or a number
c->Message(Chat::Red, "Usage: #Usage: #givemoney [pp] [gp] [sp] [cp]");
}
else if(c->GetTarget() == nullptr) {
c->Message(Chat::Red, "You must target a player to give money to.");
}
else if(!c->GetTarget()->IsClient()) {
c->Message(Chat::Red, "You can only give money to players with this command.");
}
else {
//TODO: update this to the client, otherwise the client doesn't show any weight change until you zone, move an item, etc
c->GetTarget()->CastToClient()->AddMoneyToPP(atoi(sep->arg[4]), atoi(sep->arg[3]), atoi(sep->arg[2]), atoi(sep->arg[1]), true);
c->Message(Chat::White, "Added %i Platinum, %i Gold, %i Silver, and %i Copper to %s's inventory.", atoi(sep->arg[1]), atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), c->GetTarget()->GetName());
}
}
void command_itemsearch(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Usage: #itemsearch [search string]");
else
{
const char *search_criteria=sep->argplus[1];
const EQEmu::ItemData* item = nullptr;
EQEmu::SayLinkEngine linker;
linker.SetLinkType(EQEmu::saylink::SayLinkItemData);
if (Seperator::IsNumber(search_criteria)) {
item = database.GetItem(atoi(search_criteria));
if (item) {
linker.SetItemData(item);
c->Message(Chat::White, "%u: %s", item->ID, linker.GenerateLink().c_str());
}
else {
c->Message(Chat::White, "Item #%s not found", search_criteria);
}
return;
}
int count = 0;
char sName[64];
char sCriteria[255];
strn0cpy(sCriteria, search_criteria, sizeof(sCriteria));
strupr(sCriteria);
char* pdest;
uint32 it = 0;
while ((item = database.IterateItems(&it))) {
strn0cpy(sName, item->Name, sizeof(sName));
strupr(sName);
pdest = strstr(sName, sCriteria);
if (pdest != nullptr) {
linker.SetItemData(item);
c->Message(Chat::White, "%u: %s", item->ID, linker.GenerateLink().c_str());
++count;
}
if (count == 50)
break;
}
if (count == 50)
c->Message(Chat::White, "50 items shown...too many results.");
else
c->Message(Chat::White, "%i items found", count);
}
}
void command_setaaxp(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if (sep->IsNumber(1)) {
t->SetEXP(t->GetEXP(), atoi(sep->arg[1]), false);
if(sep->IsNumber(2) && sep->IsNumber(3)) {
t->SetLeadershipEXP(atoi(sep->arg[2]), atoi(sep->arg[3]));
}
} else
c->Message(Chat::White, "Usage: #setaaxp <new AA XP value> (<new Group AA XP value> <new Raid XP value>)");
}
void command_setaapts(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0] == '\0' || sep->arg[2][0] == '\0')
c->Message(Chat::White, "Usage: #setaapts <AA|group|raid> <new AA points value>");
else if(atoi(sep->arg[2]) <= 0 || atoi(sep->arg[2]) > 5000)
c->Message(Chat::White, "You must have a number greater than 0 for points and no more than 5000.");
else if(!strcasecmp(sep->arg[1], "group")) {
t->GetPP().group_leadership_points = atoi(sep->arg[2]);
t->GetPP().group_leadership_exp = 0;
t->Message(Chat::Experience, "Setting Group AA points to %u", t->GetPP().group_leadership_points);
t->SendLeadershipEXPUpdate();
} else if(!strcasecmp(sep->arg[1], "raid")) {
t->GetPP().raid_leadership_points = atoi(sep->arg[2]);
t->GetPP().raid_leadership_exp = 0;
t->Message(Chat::Experience, "Setting Raid AA points to %u", t->GetPP().raid_leadership_points);
t->SendLeadershipEXPUpdate();
} else {
t->GetPP().aapoints = atoi(sep->arg[2]);
t->GetPP().expAA = 0;
t->Message(Chat::Experience, "Setting personal AA points to %u", t->GetPP().aapoints);
t->SendAlternateAdvancementStats();
}
}
void command_setcrystals(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(sep->arg[1][0] == '\0' || sep->arg[2][0] == '\0')
c->Message(Chat::White, "Usage: #setcrystals <radiant|ebon> <new crystal count value>");
else if(atoi(sep->arg[2]) <= 0 || atoi(sep->arg[2]) > 100000)
c->Message(Chat::White, "You must have a number greater than 0 for crystals and no more than 100000.");
else if(!strcasecmp(sep->arg[1], "radiant"))
{
t->SetRadiantCrystals(atoi(sep->arg[2]));
t->SendCrystalCounts();
t->SaveCurrency();
}
else if(!strcasecmp(sep->arg[1], "ebon"))
{
t->SetEbonCrystals(atoi(sep->arg[2]));
t->SendCrystalCounts();
t->SaveCurrency();
}
else
{
c->Message(Chat::White, "Usage: #setcrystals <radiant|ebon> <new crystal count value>");
}
}
void command_stun(Client *c, const Seperator *sep)
{
Mob *t=c->CastToMob();
uint32 duration;
if(sep->arg[1][0])
{
duration = atoi(sep->arg[1]);
if(c->GetTarget())
t=c->GetTarget();
if(t->IsClient())
t->CastToClient()->Stun(duration);
else
t->CastToNPC()->Stun(duration);
}
else
c->Message(Chat::White, "Usage: #stun [duration]");
}
void command_ban(Client *c, const Seperator *sep)
{
if(sep->arg[1][0] == 0 || sep->arg[2][0] == 0) {
c->Message(Chat::White, "Usage: #ban <charname> <message>");
return;
}
auto account_id = database.GetAccountIDByChar(sep->arg[1]);
std::string message;
int i = 2;
while(1) {
if(sep->arg[i][0] == 0) {
break;
}
if(message.length() > 0) {
message.push_back(' ');
}
message += sep->arg[i];
++i;
}
if(message.length() == 0) {
c->Message(Chat::White, "Usage: #ban <charname> <message>");
return;
}
if(account_id == 0) {
c->Message(Chat::Red, "Character does not exist.");
return;
}
std::string query = StringFormat("UPDATE account SET status = -2, ban_reason = '%s' "
"WHERE id = %i", EscapeString(message).c_str(), account_id);
auto results = database.QueryDatabase(query);
c->Message(Chat::Red, "Account number %i with the character %s has been banned with message: \"%s\"", account_id, sep->arg[1], message.c_str());
ServerPacket flagUpdatePack(ServerOP_FlagUpdate, 6);
*((uint32*)&flagUpdatePack.pBuffer[0]) = account_id;
*((int16*)&flagUpdatePack.pBuffer[4]) = -2;
worldserver.SendPacket(&flagUpdatePack);
Client *client = nullptr;
client = entity_list.GetClientByName(sep->arg[1]);
if(client) {
client->WorldKick();
return;
}
ServerPacket kickPlayerPack(ServerOP_KickPlayer, sizeof(ServerKickPlayer_Struct));
ServerKickPlayer_Struct* skp = (ServerKickPlayer_Struct*)kickPlayerPack.pBuffer;
strcpy(skp->adminname, c->GetName());
strcpy(skp->name, sep->arg[1]);
skp->adminrank = c->Admin();
worldserver.SendPacket(&kickPlayerPack);
}
void command_suspend(Client *c, const Seperator *sep)
{
if((sep->arg[1][0] == 0) || (sep->arg[2][0] == 0)) {
c->Message(Chat::White, "Usage: #suspend <charname> <days> (Specify 0 days to lift the suspension immediately) <message>");
return;
}
int duration = atoi(sep->arg[2]);
if(duration < 0)
duration = 0;
std::string message;
if(duration > 0) {
int i = 3;
while(1) {
if(sep->arg[i][0] == 0) {
break;
}
if(message.length() > 0) {
message.push_back(' ');
}
message += sep->arg[i];
++i;
}
if(message.length() == 0) {
c->Message(Chat::White, "Usage: #suspend <charname> <days>(Specify 0 days to lift the suspension immediately) <message>");
return;
}
}
auto escName = new char[strlen(sep->arg[1]) * 2 + 1];
database.DoEscapeString(escName, sep->arg[1], strlen(sep->arg[1]));
int accountID = database.GetAccountIDByChar(escName);
safe_delete_array(escName);
if (accountID <= 0) {
c->Message(Chat::Red,"Character does not exist.");
return;
}
std::string query = StringFormat("UPDATE `account` SET `suspendeduntil` = DATE_ADD(NOW(), INTERVAL %i DAY), "
"suspend_reason = '%s' WHERE `id` = %i",
duration, EscapeString(message).c_str(), accountID);
auto results = database.QueryDatabase(query);
if(duration)
c->Message(Chat::Red,"Account number %i with the character %s has been temporarily suspended for %i day(s).", accountID, sep->arg[1], duration);
else
c->Message(Chat::Red,"Account number %i with the character %s is no longer suspended.", accountID, sep->arg[1]);
Client *bannedClient = entity_list.GetClientByName(sep->arg[1]);
if(bannedClient) {
bannedClient->WorldKick();
return;
}
auto pack = new ServerPacket(ServerOP_KickPlayer, sizeof(ServerKickPlayer_Struct));
ServerKickPlayer_Struct *sks = (ServerKickPlayer_Struct *)pack->pBuffer;
strn0cpy(sks->adminname, c->GetName(), sizeof(sks->adminname));
strn0cpy(sks->name, sep->arg[1], sizeof(sks->name));
sks->adminrank = c->Admin();
worldserver.SendPacket(pack);
safe_delete(pack);
}
void command_ipban(Client *c, const Seperator *sep)
{
if(sep->arg[1] == 0)
{
c->Message(Chat::White, "Usage: #ipban [xxx.xxx.xxx.xxx]");
} else {
if(database.AddBannedIP(sep->arg[1], c->GetName())) {
c->Message(Chat::White, "%s has been successfully added to the banned_ips table by %s", sep->arg[1], c->GetName());
} else {
c->Message(Chat::White, "IPBan Failed (IP address is possibly already in the table?)");
}
}
}
void command_revoke(Client *c, const Seperator *sep)
{
if(sep->arg[1][0] == 0 || sep->arg[2][0] == 0) {
c->Message(Chat::White, "Usage: #revoke [charname] [1/0]");
return;
}
uint32 characterID = database.GetAccountIDByChar(sep->arg[1]);
if(characterID == 0) {
c->Message(Chat::Red,"Character does not exist.");
return;
}
int flag = sep->arg[2][0] == '1' ? true : false;
std::string query = StringFormat("UPDATE account SET revoked = %d WHERE id = %i", flag, characterID);
auto results = database.QueryDatabase(query);
c->Message(Chat::Red,"%s account number %i with the character %s.", flag? "Revoking": "Unrevoking", characterID, sep->arg[1]);
Client* revokee = entity_list.GetClientByAccID(characterID);
if(revokee) {
c->Message(Chat::White, "Found %s in this zone.", revokee->GetName());
revokee->SetRevoked(flag);
return;
}
c->Message(Chat::Red, "#revoke: Couldn't find %s in this zone, passing request to worldserver.", sep->arg[1]);
auto outapp = new ServerPacket(ServerOP_Revoke, sizeof(RevokeStruct));
RevokeStruct *revoke = (RevokeStruct *)outapp->pBuffer;
strn0cpy(revoke->adminname, c->GetName(), 64);
strn0cpy(revoke->name, sep->arg[1], 64);
revoke->toggle = flag;
worldserver.SendPacket(outapp);
safe_delete(outapp);
}
void command_roambox(Client *c, const Seperator *sep)
{
std::string arg1 = sep->arg[1];
Mob *target = c->GetTarget();
if (!target || !target->IsNPC()) {
c->Message(Chat::Red, "You need a valid NPC target for this command");
return;
}
NPC *npc = dynamic_cast<NPC *>(target);
int spawn_group_id = npc->GetSpawnGroupId();
if (spawn_group_id <= 0) {
c->Message(Chat::Red, "NPC needs a valid SpawnGroup!");
return;
}
if (arg1 == "set") {
int box_size = (sep->arg[2] ? atoi(sep->arg[2]) : 0);
int delay = (sep->arg[3] ? atoi(sep->arg[3]) : 15000);
if (box_size > 0) {
std::string query = fmt::format(
SQL(
UPDATE spawngroup SET
dist = {},
min_x = {},
max_x = {},
min_y = {},
max_y = {},
delay = {}
WHERE id = {}
),
(box_size / 2),
npc->GetX() - (box_size / 2),
npc->GetX() + (box_size / 2),
npc->GetY() - (box_size / 2),
npc->GetY() + (box_size / 2),
delay,
spawn_group_id
);
database.QueryDatabase(query);
c->Message(
Chat::Yellow,
"NPC (%s) Roam Box set to box size of [%i] SpawnGroupId [%i] delay [%i]",
npc->GetCleanName(),
box_size,
spawn_group_id,
delay
);
return;
}
c->Message(Chat::Red, "Box size must be set!");
}
if (arg1 == "remove") {
std::string query = fmt::format(
SQL(
UPDATE spawngroup SET
dist = 0,
min_x = 0,
max_x = 0,
min_y = 0,
max_y = 0,
delay = 0
WHERE id = {}
),
spawn_group_id
);
database.QueryDatabase(query);
c->Message(
Chat::Yellow,
"NPC (%s) Roam Box has been removed from SpawnGroupID [%i]",
npc->GetCleanName(),
spawn_group_id
);
return;
}
c->Message(Chat::Yellow, "> Command Usage");
c->Message(Chat::Yellow, "#roambox set box_size [delay = 0]");
c->Message(Chat::Yellow, "#roambox remove");
}
void command_oocmute(Client *c, const Seperator *sep)
{
if(sep->arg[1][0] == 0 || !(sep->arg[1][0] == '1' || sep->arg[1][0] == '0'))
c->Message(Chat::White, "Usage: #oocmute [1/0]");
else {
auto outapp = new ServerPacket(ServerOP_OOCMute, 1);
*(outapp->pBuffer) = atoi(sep->arg[1]);
worldserver.SendPacket(outapp);
safe_delete(outapp);
}
}
void command_checklos(Client *c, const Seperator *sep)
{
if (c->GetTarget()) {
if (c->CheckLosFN(c->GetTarget())) {
c->Message(Chat::White, "You have LOS to %s", c->GetTarget()->GetName());
}
else {
c->Message(Chat::White, "You do not have LOS to %s", c->GetTarget()->GetName());
}
}
else {
c->Message(Chat::White, "ERROR: Target required");
}
}
void command_set_adventure_points(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient())
t=c->GetTarget()->CastToClient();
if(!sep->arg[1][0])
{
c->Message(Chat::White, "Usage: #setadventurepoints [points] [theme]");
return;
}
if(!sep->IsNumber(1) || !sep->IsNumber(2))
{
c->Message(Chat::White, "Usage: #setadventurepoints [points] [theme]");
return;
}
c->Message(Chat::White, "Updating adventure points for %s", t->GetName());
t->UpdateLDoNPoints(atoi(sep->arg[1]), atoi(sep->arg[2]));
}
void command_npcsay(Client *c, const Seperator *sep)
{
if(c->GetTarget() && c->GetTarget()->IsNPC() && sep->arg[1][0])
{
c->GetTarget()->Say(sep->argplus[1]);
}
else
{
c->Message(Chat::White, "Usage: #npcsay message (requires NPC target");
}
}
void command_npcshout(Client *c, const Seperator *sep)
{
if(c->GetTarget() && c->GetTarget()->IsNPC() && sep->arg[1][0])
{
c->GetTarget()->Shout(sep->argplus[1]);
}
else
{
c->Message(Chat::White, "Usage: #npcshout message (requires NPC target");
}
}
void command_timers(Client *c, const Seperator *sep) {
if(!c->GetTarget() || !c->GetTarget()->IsClient()) {
c->Message(Chat::White,"Need a player target for timers.");
return;
}
Client *them = c->GetTarget()->CastToClient();
std::vector< std::pair<pTimerType, PersistentTimer *> > res;
them->GetPTimers().ToVector(res);
c->Message(Chat::White,"Timers for target:");
int r;
int l = res.size();
for(r = 0; r < l; r++) {
c->Message(Chat::White,"Timer %d: %d seconds remain.", res[r].first, res[r].second->GetRemainingTime());
}
}
void command_npcemote(Client *c, const Seperator *sep)
{
if(c->GetTarget() && c->GetTarget()->IsNPC() && sep->arg[1][0])
{
c->GetTarget()->Emote(sep->argplus[1]);
}
else
{
c->Message(Chat::White, "Usage: #npcemote message (requires NPC target");
}
}
void command_npceditmass(Client *c, const Seperator *sep)
{
if (strcasecmp(sep->arg[1], "usage") == 0) {
c->Message(Chat::White, "#npceditmass search_column [exact_match: =]search_value change_column change_value");
return;
}
std::string query = SQL(
SELECT
COLUMN_NAME
FROM
INFORMATION_SCHEMA.COLUMNS
WHERE
table_name = 'npc_types'
AND
COLUMN_NAME != 'id'
);
std::string search_column, search_value, change_column, change_value;
if (sep->arg[1]) {
search_column = sep->arg[1];
}
if (sep->arg[2]) {
search_value = sep->arg[2];
}
if (sep->arg[3]) {
change_column = sep->arg[3];
}
if (sep->arg[4]) {
change_value = sep->arg[4];
}
bool valid_change_column = false;
bool valid_search_column = false;
auto results = database.QueryDatabase(query);
std::vector <std::string> possible_column_options;
for (auto row = results.begin(); row != results.end(); ++row) {
if (row[0] == change_column) {
valid_change_column = true;
}
if (row[0] == search_column) {
valid_search_column = true;
}
possible_column_options.push_back(row[0]);
}
std::string options_glue = ", ";
if (!valid_search_column) {
c->Message(Chat::Red, "You must specify a valid search column. [%s] is not valid", search_column.c_str());
c->Message(Chat::Yellow, "Possible columns [%s]", implode(options_glue, possible_column_options).c_str());
return;
}
if (!valid_change_column) {
c->Message(Chat::Red, "You must specify a valid change column. [%s] is not valid", change_column.c_str());
c->Message(Chat::Yellow, "Possible columns [%s]", implode(options_glue, possible_column_options).c_str());
return;
}
if (!valid_search_column || !valid_change_column) {
c->Message(Chat::Red, "One requested column is invalid");
return;
}
query = fmt::format(
SQL(
select
id,
name,
{0},
{1}
from
npc_types
where
id IN(
select
spawnentry.npcID
from
spawnentry
join spawn2 on spawn2.spawngroupID = spawnentry.spawngroupID
where
spawn2.zone = '{2}' and spawn2.version = {3}
)
),
search_column,
change_column,
zone->GetShortName(),
zone->GetInstanceVersion()
);
std::string status = "(Searching)";
if (strcasecmp(sep->arg[5], "apply") == 0) {
status = "(Applying)";
}
std::vector <std::string> npc_ids;
bool exact_match = false;
if (search_value[0] == '=') {
exact_match = true;
search_value = search_value.substr(1);
}
int found_count = 0;
results = database.QueryDatabase(query);
for (auto row = results.begin(); row != results.end(); ++row) {
std::string npc_id = row[0];
std::string npc_name = row[1];
std::string search_column_value = str_tolower(row[2]);
std::string change_column_current_value = row[3];
if (exact_match) {
if (search_column_value.compare(search_value) != 0) {
continue;
}
}
else {
if (search_column_value.find(search_value) == std::string::npos) {
continue;
}
}
c->Message(
Chat::Yellow,
fmt::format(
"NPC ({0}) [{1}] ({2}) [{3}] Current ({4}) [{5}] New [{6}] {7}",
npc_id,
npc_name,
search_column,
search_column_value,
change_column,
change_column_current_value,
change_value,
status
).c_str()
);
npc_ids.push_back(npc_id);
found_count++;
}
std::string saylink = fmt::format(
"#npceditmass {} {}{} {} {} apply",
search_column,
(exact_match ? '=' : '\0'),
search_value,
change_column,
change_value
);
if (strcasecmp(sep->arg[5], "apply") == 0) {
std::string npc_ids_string = implode(",", npc_ids);
if (npc_ids_string.empty()) {
c->Message(Chat::Red, "Error: Ran into an unknown error compiling NPC IDs");
return;
}
database.QueryDatabase(
fmt::format(
"UPDATE `npc_types` SET {} = '{}' WHERE id IN ({})",
change_column,
change_value,
npc_ids_string
)
);
c->Message(Chat::Yellow, "Changes applied to (%i) NPC's", found_count);
zone->Repop();
}
else {
c->Message(Chat::Yellow, "Found (%i) NPC's that match this search...", found_count);
if (found_count > 0) {
c->Message(
Chat::Yellow, "To apply these changes, click <%s> or type [%s]",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Apply").c_str(),
saylink.c_str()
);
}
}
}
void command_npcedit(Client *c, const Seperator *sep)
{ if (!c->GetTarget() || !c->GetTarget()->IsNPC()) {
c->Message(Chat::White, "Error: Must have NPC targeted");
return;
}
if (strcasecmp(sep->arg[1], "help") == 0) {
c->Message(Chat::White, "Help File for #npcedit. Syntax for commands are:");
c->Message(Chat::White, "#npcedit Name - Sets an NPC's name");
c->Message(Chat::White, "#npcedit Lastname - Sets an NPC's lastname");
c->Message(Chat::White, "#npcedit Level - Sets an NPC's level");
c->Message(Chat::White, "#npcedit Maxlevel - Sets an NPC's maximum level");
c->Message(Chat::White, "#npcedit Race - Sets an NPC's race");
c->Message(Chat::White, "#npcedit Class - Sets an NPC's class");
c->Message(Chat::White, "#npcedit Bodytype - Sets an NPC's bodytype");
c->Message(Chat::White, "#npcedit HP - Sets an NPC's hitpoints");
c->Message(Chat::White, "#npcedit Gender - Sets an NPC's gender");
c->Message(Chat::White, "#npcedit Texture - Sets an NPC's texture");
c->Message(Chat::White, "#npcedit Helmtexture - Sets an NPC's helmet texture");
c->Message(Chat::White, "#npcedit Armtexture - Sets an NPC's arm texture");
c->Message(Chat::White, "#npcedit Bracertexture - Sets an NPC's bracer texture");
c->Message(Chat::White, "#npcedit Handtexture - Sets an NPC's hand texture");
c->Message(Chat::White, "#npcedit Legtexture - Sets an NPC's leg texture");
c->Message(Chat::White, "#npcedit Feettexture - Sets an NPC's feettexture");
c->Message(Chat::White, "#npcedit Herosforgemodel - Sets an NPC's Hero's Forge Model");
c->Message(Chat::White, "#npcedit Size - Sets an NPC's size");
c->Message(Chat::White, "#npcedit Hpregen - Sets an NPC's hitpoint regen rate per tick");
c->Message(Chat::White, "#npcedit Manaregen - Sets an NPC's mana regen rate per tick");
c->Message(Chat::White, "#npcedit Loottable - Sets the loottable ID for an NPC ");
c->Message(Chat::White, "#npcedit Merchantid - Sets the merchant ID for an NPC");
c->Message(Chat::White, "#npcedit alt_currency_id - Sets the Alternate Currency ID for an alterative currency Merchant");
c->Message(Chat::White, "#npcedit npc_spells_effects_id - Sets the NPC Spell Effects ID");
c->Message(Chat::White, "#npcedit adventure_template_id - Sets the NPC's Adventure Template ID");
c->Message(Chat::White, "#npcedit trap_template - Sets the NPC's Trap Template ID");
c->Message(Chat::White, "#npcedit special_abilities - Sets the NPC's Special Abilities");
c->Message(Chat::White, "#npcedit Spell - Sets the npc spells list ID for an NPC");
c->Message(Chat::White, "#npcedit Faction - Sets the NPC's faction id");
c->Message(Chat::White, "#npcedit Damage - Sets an NPC's damage");
c->Message(Chat::White, "#npcedit Meleetype - Sets an NPC's melee types");
c->Message(Chat::White, "#npcedit Rangedtype - Sets an NPC's ranged type");
c->Message(Chat::White, "#npcedit Ammoidfile - Sets an NPC's ammo id file");
c->Message(Chat::White, "#npcedit Aggroradius - Sets an NPC's aggro radius");
c->Message(Chat::White, "#npcedit Assistradius - Sets an NPC's assist radius");
c->Message(Chat::White, "#npcedit Social - Set to 1 if an NPC should assist others on its faction");
c->Message(Chat::White, "#npcedit Runspeed - Sets an NPC's run speed");
c->Message(Chat::White, "#npcedit Walkspeed - Sets an NPC's walk speed");
c->Message(Chat::White, "#npcedit AGI - Sets an NPC's Agility");
c->Message(Chat::White, "#npcedit CHA - Sets an NPC's Charisma");
c->Message(Chat::White, "#npcedit DEX - Sets an NPC's Dexterity");
c->Message(Chat::White, "#npcedit INT - Sets an NPC's Intelligence");
c->Message(Chat::White, "#npcedit STA - Sets an NPC's Stamina");
c->Message(Chat::White, "#npcedit STR - Sets an NPC's Strength");
c->Message(Chat::White, "#npcedit WIS - Sets an NPC's Wisdom");
c->Message(Chat::White, "#npcedit MR - Sets an NPC's Magic Resistance");
c->Message(Chat::White, "#npcedit PR - Sets an NPC's Poison Resistance");
c->Message(Chat::White, "#npcedit DR - Sets an NPC's Disease Resistance");
c->Message(Chat::White, "#npcedit FR - Sets an NPC's Fire Resistance");
c->Message(Chat::White, "#npcedit CR - Sets an NPC's Cold Resistance");
c->Message(Chat::White, "#npcedit Corrup - Sets an NPC's Corruption Resistance");
c->Message(Chat::White, "#npcedit PhR - Sets and NPC's Physical Resistance");
c->Message(Chat::White, "#npcedit Seeinvis - Sets an NPC's ability to see invis");
c->Message(Chat::White, "#npcedit Seeinvisundead - Sets an NPC's ability to see through invis vs. undead");
c->Message(Chat::White, "#npcedit Seehide - Sets an NPC's ability to see through hide");
c->Message(Chat::White, "#npcedit Seeimprovedhide - Sets an NPC's ability to see through improved hide");
c->Message(Chat::White, "#npcedit AC - Sets an NPC's Armor Class");
c->Message(Chat::White, "#npcedit ATK - Sets an NPC's Attack");
c->Message(Chat::White, "#npcedit Accuracy - Sets an NPC's Accuracy");
c->Message(Chat::White, "#npcedit Avoidance - Sets an NPC's Avoidance");
c->Message(Chat::White, "#npcedit npcaggro - Sets an NPC's npc_aggro flag");
c->Message(Chat::White, "#npcedit qglobal - Sets an NPC's quest global flag");
c->Message(Chat::White, "#npcedit spawn_limit - Sets an NPC's spawn limit counter");
c->Message(Chat::White, "#npcedit Attackspeed - Sets an NPC's attack speed modifier");
c->Message(Chat::White, "#npcedit Attackdelay - Sets an NPC's attack delay");
c->Message(Chat::White, "#npcedit Attackcount - Sets an NPC's attack count");
c->Message(Chat::White, "#npcedit findable - Sets an NPC's findable flag");
c->Message(Chat::White, "#npcedit trackable - Sets an NPC's trackable flag");
c->Message(Chat::White, "#npcedit weapon - Sets an NPC's primary and secondary weapon model");
c->Message(Chat::White, "#npcedit featuresave - Saves all current facial features to the database");
c->Message(Chat::White, "#npcedit color - Sets an NPC's red, green, and blue armor tint");
c->Message(Chat::White, "#npcedit armortint_id - Set an NPC's Armor tint ID");
c->Message(Chat::White, "#npcedit setanimation - Set an NPC's animation on spawn (Stored in spawn2 table)");
c->Message(Chat::White, "#npcedit scalerate - Set an NPC's scaling rate");
c->Message(Chat::White, "#npcedit healscale - Set an NPC's heal scaling rate");
c->Message(Chat::White, "#npcedit spellscale - Set an NPC's spell scaling rate");
c->Message(Chat::White, "#npcedit no_target - Set an NPC's ability to be targeted with the target hotkey");
c->Message(Chat::White, "#npcedit version - Set an NPC's version");
c->Message(Chat::White, "#npcedit slow_mitigation - Set an NPC's slow mitigation");
c->Message(Chat::White, "#npcedit flymode - Set an NPC's flymode [0 = ground, 1 = flying, 2 = levitate, 3 = water, 4 = floating]");
}
uint32 npcTypeID = c->GetTarget()->CastToNPC()->GetNPCTypeID();
if (strcasecmp(sep->arg[1], "name") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has the name %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET name = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "lastname") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has the lastname %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET lastname = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "flymode") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has flymode [%s]", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET flymode = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "race") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has the race %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET race = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "class") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now class %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET class = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "bodytype") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has type %i bodytype.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET bodytype = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "hp") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Hitpoints.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET hp = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "gender") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now gender %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET gender = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "texture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses texture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET texture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "helmtexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses helmtexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET helmtexture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "armtexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses armtexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET armtexture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "bracertexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses bracertexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET bracertexture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "handtexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses handtexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET handtexture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "legtexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses legtexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET legtexture = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "feettexture") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses feettexture %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET feettexture = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "herosforgemodel") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses herosforgemodel %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET herosforgemodel = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "size") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now size %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET size = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "hpregen") == 0) {
c->Message(Chat::Yellow,"NPCID %u now regens %i hitpoints per tick.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET hp_regen_rate = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "manaregen") == 0) {
c->Message(Chat::Yellow,"NPCID %u now regens %i mana per tick.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET mana_regen_rate = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "loottable") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now on loottable_id %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET loottable_id = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "merchantid") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now merchant_id %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET merchant_id = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "alt_currency_id") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'alt_currency_id' set to %s.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET alt_currency_id = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "npc_spells_effects_id") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'npc_spells_effects_id' set to %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET npc_spells_effects_id = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "adventure_template_id") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'adventure_template_id' set to %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET adventure_template_id = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "trap_template") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'trap_template' set to %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET trap_template = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "special_abilities") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'special_abilities' set to %s.", npcTypeID, sep->argplus[2]);
std::string query = StringFormat("UPDATE npc_types SET special_abilities = '%s' WHERE id = %i", sep->argplus[2],npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "spell") == 0) {
c->Message(Chat::Yellow,"NPCID %u now uses spell list %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET npc_spells_id = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "faction") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now faction %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET npc_faction_id = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "damage") == 0) {
c->Message(Chat::Yellow,"NPCID %u now hits from %i to %i", npcTypeID, atoi(sep->arg[2]), atoi(sep->arg[3]));
std::string query = StringFormat("UPDATE npc_types SET mindmg = %i, maxdmg = %i WHERE id = %i", atoi(sep->arg[2]), atoi(sep->arg[3]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "meleetype") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a primary melee type of %i and a secondary melee type of %i.", npcTypeID, atoi(sep->arg[2]), atoi(sep->arg[3]));
std::string query = StringFormat("UPDATE npc_types SET prim_melee_type = %i, sec_melee_type = %i WHERE id = %i", atoi(sep->arg[2]), atoi(sep->arg[3]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "rangedtype") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a ranged type of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET ranged_type = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "ammoidfile") == 0) {
c->Message(Chat::Yellow,"NPCID %u's ammo id file is now %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET ammoidfile = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "aggroradius") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has an aggro radius of %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET aggroradius = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "assistradius") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has an assist radius of %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET assistradius = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "social") == 0) {
c->Message(Chat::Yellow,"NPCID %u social status is now %i", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET social = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "runspeed") == 0) {
c->Message(Chat::Yellow,"NPCID %u now runs at %f", npcTypeID, atof(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET runspeed = %f WHERE id = %i", atof(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "walkspeed") == 0) {
c->Message(Chat::Yellow,"NPCID %u now walks at %f", npcTypeID, atof(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET walkspeed = %f WHERE id = %i", atof(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "AGI") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Agility.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET AGI = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "CHA") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Charisma.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET CHA = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "DEX") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Dexterity.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET DEX = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "INT") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Intelligence.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET _INT = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "STA") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Stamina.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET STA = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "STR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Strength.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET STR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "WIS") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Magic Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET WIS = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "MR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Magic Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET MR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "DR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Disease Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET DR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "CR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Cold Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET CR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "FR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Fire Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET FR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "PR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Poison Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET PR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Corrup") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Corruption Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET corrup = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "PhR") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a Physical Resistance of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET PhR = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "seeinvis") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has seeinvis set to %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET see_invis = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "seeinvisundead") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has seeinvisundead set to %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET see_invis_undead = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "seehide") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has seehide set to %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET see_hide = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "seeimprovedhide") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has seeimprovedhide set to %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET see_improved_hide = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "AC") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Armor Class.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET ac = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "ATK") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Attack.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET atk = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Accuracy") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Accuracy.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET accuracy = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Avoidance") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i Avoidance.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET avoidance = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "level") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now level %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET level = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "maxlevel") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a maximum level of %i.", npcTypeID, atoi(sep->argplus[2]));
std::string query = StringFormat("UPDATE npc_types SET maxlevel = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "qglobal") == 0) {
c->Message(Chat::Yellow,"Quest globals have been %s for NPCID %u", atoi(sep->arg[2]) == 0 ? "disabled" : "enabled", npcTypeID);
std::string query = StringFormat("UPDATE npc_types SET qglobal = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "npcaggro") == 0) {
c->Message(Chat::Yellow,"NPCID %u will now %s other NPCs with negative faction npc_value", npcTypeID, atoi(sep->arg[2]) == 0? "not aggro": "aggro");
std::string query = StringFormat("UPDATE npc_types SET npc_aggro = %i WHERE id = %i", atoi(sep->argplus[2]) == 0? 0: 1, npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "spawn_limit") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a spawn limit of %i", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET spawn_limit = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Attackspeed") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has attack_speed set to %f", npcTypeID, atof(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET attack_speed = %f WHERE id = %i", atof(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Attackdelay") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has attack_delay set to %i", npcTypeID,atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET attack_delay = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "Attackcount") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has attack_count set to %i", npcTypeID,atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET attack_count = %i WHERE id = %i", atoi(sep->argplus[2]),npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "findable") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now %s", npcTypeID, atoi(sep->arg[2]) == 0? "not findable": "findable");
std::string query = StringFormat("UPDATE npc_types SET findable = %i WHERE id = %i", atoi(sep->argplus[2]) == 0? 0: 1, npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "trackable") == 0) {
c->Message(Chat::Yellow,"NPCID %u is now %s", npcTypeID, atoi(sep->arg[2]) == 0? "not trackable": "trackable");
std::string query = StringFormat("UPDATE npc_types SET trackable = %i WHERE id = %i", atoi(sep->argplus[2]) == 0? 0: 1, npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "weapon") == 0) {
c->Message(Chat::Yellow,"NPCID %u will have item graphic %i set to his primary and item graphic %i set to his secondary on repop.", npcTypeID, atoi(sep->arg[2]), atoi(sep->arg[3]));
std::string query = StringFormat("UPDATE npc_types SET d_melee_texture1 = %i, d_melee_texture2 = %i WHERE id = %i", atoi(sep->arg[2]), atoi(sep->arg[3]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "featuresave") == 0) {
c->Message(Chat::Yellow,"NPCID %u saved with all current facial feature settings", npcTypeID);
Mob* target = c->GetTarget();
std::string query = StringFormat("UPDATE npc_types "
"SET luclin_haircolor = %i, luclin_beardcolor = %i, "
"luclin_hairstyle = %i, luclin_beard = %i, "
"face = %i, drakkin_heritage = %i, "
"drakkin_tattoo = %i, drakkin_details = %i "
"WHERE id = %i",
target->GetHairColor(), target->GetBeardColor(),
target->GetHairStyle(), target->GetBeard(),
target->GetLuclinFace(), target->GetDrakkinHeritage(),
target->GetDrakkinTattoo(), target->GetDrakkinDetails(),
npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "color") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has %i red, %i green, and %i blue tinting on their armor.", npcTypeID, atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]));
std::string query = StringFormat("UPDATE npc_types SET armortint_red = %i, armortint_green = %i, armortint_blue = %i WHERE id = %i", atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "armortint_id") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has field 'armortint_id' set to %s", npcTypeID, sep->arg[2]);
std::string query = StringFormat("UPDATE npc_types SET armortint_id = '%s' WHERE id = %i", sep->argplus[2], npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "setanimation") == 0) {
int animation = 0;
if(sep->arg[2] && atoi(sep->arg[2]) <= 4) {
if((strcasecmp(sep->arg[2], "stand" ) == 0) || atoi(sep->arg[2]) == 0)
animation = 0; //Stand
if((strcasecmp(sep->arg[2], "sit" ) == 0) || atoi(sep->arg[2]) == 1)
animation = 1; //Sit
if((strcasecmp(sep->arg[2], "crouch" ) == 0) || atoi(sep->arg[2]) == 2)
animation = 2; //Crouch
if((strcasecmp(sep->arg[2], "dead" ) == 0) || atoi(sep->arg[2]) == 3)
animation = 3; //Dead
if((strcasecmp(sep->arg[2], "loot" ) == 0) || atoi(sep->arg[2]) == 4)
animation = 4; //Looting Animation
} else {
c->Message(Chat::White, "You must specifiy an animation stand, sit, crouch, dead, loot (0-4)");
c->Message(Chat::White, "Example: #npcedit setanimation sit");
c->Message(Chat::White, "Example: #npcedit setanimation 0");
return;
}
c->Message(Chat::Yellow,"NPCID %u now has the animation set to %i on spawn with spawngroup %i", npcTypeID, animation,
c->GetTarget()->CastToNPC()->GetSpawnGroupId() );
std::string query = StringFormat("UPDATE spawn2 SET animation = %i " "WHERE spawngroupID = %i", animation,
c->GetTarget()->CastToNPC()->GetSpawnGroupId());
database.QueryDatabase(query);
c->GetTarget()->SetAppearance(EmuAppearance(animation));
return;
}
if (strcasecmp(sep->arg[1], "scalerate") == 0) {
c->Message(Chat::Yellow,"NPCID %u now has a scaling rate of %i.", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET scalerate = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "healscale") == 0) {
c->Message(Chat::Yellow, "NPCID %u now has a heal scaling rate of %i.", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET healscale = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "spellscale") == 0) {
c->Message(Chat::Yellow, "NPCID %u now has a spell scaling rate of %i.", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET spellscale = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "no_target") == 0) {
c->Message(Chat::Yellow, "NPCID %u is now %s.", npcTypeID, atoi(sep->arg[2]) == 0? "targetable": "untargetable");
std::string query = StringFormat("UPDATE npc_types SET no_target_hotkey = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "version") == 0) {
c->Message(Chat::Yellow, "NPCID %u is now version %i.", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET version = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if (strcasecmp(sep->arg[1], "slow_mitigation") == 0) {
c->Message(Chat::Yellow, "NPCID %u's slow mitigation limit is now %i.", npcTypeID, atoi(sep->arg[2]));
std::string query = StringFormat("UPDATE npc_types SET slow_mitigation = %i WHERE id = %i", atoi(sep->argplus[2]), npcTypeID);
database.QueryDatabase(query);
return;
}
if((sep->arg[1][0] == 0 || strcasecmp(sep->arg[1],"*")==0) || ((c->GetTarget()==0) || (c->GetTarget()->IsClient())))
c->Message(Chat::White, "Type #npcedit help for more info");
}
#ifdef PACKET_PROFILER
void command_packetprofile(Client *c, const Seperator *sep) {
Client *t = c;
if(c->GetTarget() && c->GetTarget()->IsClient()) {
t = c->GetTarget()->CastToClient();
}
c->DumpPacketProfile();
}
#endif
#ifdef EQPROFILE
void command_profiledump(Client *c, const Seperator *sep) {
DumpZoneProfile();
}
void command_profilereset(Client *c, const Seperator *sep) {
ResetZoneProfile();
}
#endif
void command_opcode(Client *c, const Seperator *sep) {
if(!strcasecmp(sep->arg[1], "reload" )) {
ReloadAllPatches();
c->Message(Chat::White, "Opcodes for all patches have been reloaded");
}
}
void command_qglobal(Client *c, const Seperator *sep) {
//In-game switch for qglobal column
if(sep->arg[1][0] == 0) {
c->Message(Chat::White, "Syntax: #qglobal [on/off/view]. Requires NPC target.");
return;
}
Mob *target = c->GetTarget();
if(!target || !target->IsNPC()) {
c->Message(Chat::Red, "NPC Target Required!");
return;
}
if(!strcasecmp(sep->arg[1], "on")) {
std::string query = StringFormat("UPDATE npc_types SET qglobal = 1 WHERE id = '%i'",
target->GetNPCTypeID());
auto results = database.QueryDatabase(query);
if(!results.Success()) {
c->Message(Chat::Yellow, "Could not update database.");
return;
}
c->Message(Chat::Yellow, "Success! Changes take effect on zone reboot.");
return;
}
if(!strcasecmp(sep->arg[1], "off")) {
std::string query = StringFormat("UPDATE npc_types SET qglobal = 0 WHERE id = '%i'",
target->GetNPCTypeID());
auto results = database.QueryDatabase(query);
if(!results.Success()) {
c->Message(Chat::Yellow, "Could not update database.");
return;
}
c->Message(Chat::Yellow, "Success! Changes take effect on zone reboot.");
return;
}
if(!strcasecmp(sep->arg[1], "view")) {
const NPCType *type = database.LoadNPCTypesData(target->GetNPCTypeID());
if(!type)
c->Message(Chat::Yellow, "Invalid NPC type.");
else if(type->qglobal)
c->Message(Chat::Yellow, "This NPC has quest globals active.");
else
c->Message(Chat::Yellow, "This NPC has quest globals disabled.");
return;
}
c->Message(Chat::Yellow, "Invalid action specified.");
}
void command_path(Client *c, const Seperator *sep)
{
if (zone->pathing) {
zone->pathing->DebugCommand(c, sep);
}
}
void Client::Undye() {
for (int cur_slot = EQEmu::textures::textureBegin; cur_slot <= EQEmu::textures::LastTexture; cur_slot++) {
uint8 slot2=SlotConvert(cur_slot);
EQEmu::ItemInstance* inst = m_inv.GetItem(slot2);
if(inst != nullptr) {
inst->SetColor(inst->GetItem()->Color);
database.SaveInventory(CharacterID(), inst, slot2);
}
m_pp.item_tint.Slot[cur_slot].Color = 0;
SendWearChange(cur_slot);
}
database.DeleteCharacterDye(this->CharacterID());
}
void command_undye(Client *c, const Seperator *sep)
{
if(c->GetTarget() && c->GetTarget()->IsClient())
{
c->GetTarget()->CastToClient()->Undye();
}
else
{
c->Message(Chat::White, "ERROR: Client target required");
}
}
void command_ucs(Client *c, const Seperator *sep)
{
if (!c)
return;
LogInfo("Character [{}] attempting ucs reconnect while ucs server is [{}] available",
c->GetName(), (zone->IsUCSServerAvailable() ? "" : "un"));
if (zone->IsUCSServerAvailable()) {
EQApplicationPacket* outapp = nullptr;
std::string buffer;
std::string MailKey = database.GetMailKey(c->CharacterID(), true);
EQEmu::versions::UCSVersion ConnectionType = EQEmu::versions::ucsUnknown;
// chat server packet
switch (c->ClientVersion()) {
case EQEmu::versions::ClientVersion::Titanium:
ConnectionType = EQEmu::versions::ucsTitaniumChat;
break;
case EQEmu::versions::ClientVersion::SoF:
ConnectionType = EQEmu::versions::ucsSoFCombined;
break;
case EQEmu::versions::ClientVersion::SoD:
ConnectionType = EQEmu::versions::ucsSoDCombined;
break;
case EQEmu::versions::ClientVersion::UF:
ConnectionType = EQEmu::versions::ucsUFCombined;
break;
case EQEmu::versions::ClientVersion::RoF:
ConnectionType = EQEmu::versions::ucsRoFCombined;
break;
case EQEmu::versions::ClientVersion::RoF2:
ConnectionType = EQEmu::versions::ucsRoF2Combined;
break;
default:
ConnectionType = EQEmu::versions::ucsUnknown;
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->ChatHost.c_str(),
Config->ChatPort,
Config->ShortName.c_str(),
c->GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
c->QueuePacket(outapp);
safe_delete(outapp);
// mail server packet
switch (c->ClientVersion()) {
case EQEmu::versions::ClientVersion::Titanium:
ConnectionType = EQEmu::versions::ucsTitaniumMail;
break;
default:
// retain value from previous switch
break;
}
buffer = StringFormat("%s,%i,%s.%s,%c%s",
Config->MailHost.c_str(),
Config->MailPort,
Config->ShortName.c_str(),
c->GetName(),
ConnectionType,
MailKey.c_str()
);
outapp = new EQApplicationPacket(OP_SetChatServer2, (buffer.length() + 1));
memcpy(outapp->pBuffer, buffer.c_str(), buffer.length());
outapp->pBuffer[buffer.length()] = '\0';
c->QueuePacket(outapp);
safe_delete(outapp);
}
}
void command_undyeme(Client *c, const Seperator *sep)
{
if(c) {
c->Undye();
c->Message(Chat::Red, "Dye removed from all slots. Please zone for the process to complete.");
}
}
void command_ginfo(Client *c, const Seperator *sep)
{
Client *t;
if(c->GetTarget() && c->GetTarget()->IsClient())
t = c->GetTarget()->CastToClient();
else
t = c;
Group *g = t->GetGroup();
if(!g) {
c->Message(Chat::White, "This client is not in a group");
return;
}
c->Message(Chat::White, "Player: %s is in Group #%lu: with %i members", t->GetName(), (unsigned long)g->GetID(), g->GroupCount());
uint32 r;
for(r = 0; r < MAX_GROUP_MEMBERS; r++) {
if(g->members[r] == nullptr) {
if(g->membername[r][0] == '\0')
continue;
c->Message(Chat::White, "...Zoned Member: %s, Roles: %s %s %s", g->membername[r],
(g->MemberRoles[r] & RoleAssist) ? "Assist" : "",
(g->MemberRoles[r] & RoleTank) ? "Tank" : "",
(g->MemberRoles[r] & RolePuller) ? "Puller" : "");
} else {
c->Message(Chat::White, "...In-Zone Member: %s (0x%x) Roles: %s %s %s", g->membername[r], g->members[r],
(g->MemberRoles[r] & RoleAssist) ? "Assist" : "",
(g->MemberRoles[r] & RoleTank) ? "Tank" : "",
(g->MemberRoles[r] & RolePuller) ? "Puller" : "");
}
}
}
void command_hp(Client *c, const Seperator *sep)
{
c->SendHPUpdate();
c->CheckManaEndUpdate();
}
void command_aggro(Client *c, const Seperator *sep)
{
if(c->GetTarget() == nullptr || !c->GetTarget()->IsNPC()) {
c->Message(Chat::White, "Error: you must have an NPC target.");
return;
}
float d = atof(sep->arg[1]);
if(d == 0.0f) {
c->Message(Chat::Red, "Error: distance argument required.");
return;
}
bool verbose = false;
if(sep->arg[2][0] == '-' && sep->arg[2][1] == 'v' && sep->arg[2][2] == '\0') {
verbose = true;
}
entity_list.DescribeAggro(c, c->GetTarget()->CastToNPC(), d, verbose);
}
void command_pf(Client *c, const Seperator *sep)
{
if(c->GetTarget())
{
Mob *who = c->GetTarget();
c->Message(Chat::White, "POS: (%.2f, %.2f, %.2f)", who->GetX(), who->GetY(), who->GetZ());
c->Message(Chat::White, "WP: %s (%d/%d)", to_string(who->GetCurrentWayPoint()).c_str(), who->IsNPC()?who->CastToNPC()->GetMaxWp():-1);
c->Message(Chat::White, "pause=%d RAspeed=%d", who->GetCWPP(), who->GetRunAnimSpeed());
//who->DumpMovement(c);
} else {
c->Message(Chat::White, "ERROR: target required");
}
}
void command_bestz(Client *c, const Seperator *sep) {
if (zone->zonemap == nullptr) {
c->Message(Chat::White,"Map not loaded for this zone");
} else {
glm::vec3 me;
me.x = c->GetX();
me.y = c->GetY();
me.z = c->GetZ() + (c->GetSize() == 0.0 ? 6 : c->GetSize()) * HEAD_POSITION;
glm::vec3 hit;
glm::vec3 bme(me);
bme.z -= 500;
float best_z = zone->zonemap->FindBestZ(me, &hit);
if (best_z != BEST_Z_INVALID)
{
c->Message(Chat::White, "Z is %.3f at (%.3f, %.3f).", best_z, me.x, me.y);
}
else
{
c->Message(Chat::White, "Found no Z.");
}
}
if(zone->watermap == nullptr) {
c->Message(Chat::White,"Water Region Map not loaded for this zone");
} else {
WaterRegionType RegionType;
float z;
if(c->GetTarget()) {
z=c->GetTarget()->GetZ();
auto position = glm::vec3(c->GetTarget()->GetX(), c->GetTarget()->GetY(), z);
RegionType = zone->watermap->ReturnRegionType(position);
c->Message(Chat::White,"InWater returns %d", zone->watermap->InWater(position));
c->Message(Chat::White,"InLava returns %d", zone->watermap->InLava(position));
}
else {
z=c->GetZ();
auto position = glm::vec3(c->GetX(), c->GetY(), z);
RegionType = zone->watermap->ReturnRegionType(position);
c->Message(Chat::White,"InWater returns %d", zone->watermap->InWater(position));
c->Message(Chat::White,"InLava returns %d", zone->watermap->InLava(position));
}
switch(RegionType) {
case RegionTypeNormal: { c->Message(Chat::White,"There is nothing special about the region you are in!"); break; }
case RegionTypeWater: { c->Message(Chat::White,"You/your target are in Water."); break; }
case RegionTypeLava: { c->Message(Chat::White,"You/your target are in Lava."); break; }
case RegionTypeVWater: { c->Message(Chat::White,"You/your target are in VWater (Icy Water?)."); break; }
case RegionTypePVP: { c->Message(Chat::White, "You/your target are in a pvp enabled area."); break; }
case RegionTypeSlime: { c->Message(Chat::White, "You/your target are in slime."); break; }
case RegionTypeIce: { c->Message(Chat::White, "You/your target are in ice."); break; }
default: c->Message(Chat::White,"You/your target are in an unknown region type.");
}
}
}
void command_reloadstatic(Client *c, const Seperator *sep) {
c->Message(Chat::White, "Reloading zone static data...");
zone->ReloadStaticData();
}
void command_flags(Client *c, const Seperator *sep) {
Client *t = c;
if(c->Admin() >= minStatusToSeeOthersZoneFlags) {
Mob *tgt = c->GetTarget();
if(tgt != nullptr && tgt->IsClient())
t = tgt->CastToClient();
}
t->SendZoneFlagInfo(c);
}
void command_flagedit(Client *c, const Seperator *sep) {
//super-command for editing zone flags
if(sep->arg[1][0] == '\0' || !strcasecmp(sep->arg[1], "help")) {
c->Message(Chat::White, "Syntax: #flagedit [lockzone|unlockzone|listzones|give|take].");
c->Message(Chat::White, "...lockzone [zone id/short] [flag name] - Set the specified flag name on the zone, locking the zone");
c->Message(Chat::White, "...unlockzone [zone id/short] - Removes the flag requirement from the specified zone");
c->Message(Chat::White, "...listzones - List all zones which require a flag, and their flag's name");
c->Message(Chat::White, "...give [zone id/short] - Give your target the zone flag for the specified zone.");
c->Message(Chat::White, "...take [zone id/short] - Take the zone flag for the specified zone away from your target");
c->Message(Chat::White, "...Note: use #flags to view flags on a person");
return;
}
if(!strcasecmp(sep->arg[1], "lockzone")) {
uint32 zoneid = 0;
if(sep->arg[2][0] != '\0') {
zoneid = atoi(sep->arg[2]);
if(zoneid < 1) {
zoneid = database.GetZoneID(sep->arg[2]);
}
}
if(zoneid < 1) {
c->Message(Chat::Red, "zone required. see help.");
return;
}
char flag_name[128];
if(sep->argplus[3][0] == '\0') {
c->Message(Chat::Red, "flag name required. see help.");
return;
}
database.DoEscapeString(flag_name, sep->argplus[3], 64);
flag_name[127] = '\0';
std::string query = StringFormat("UPDATE zone SET flag_needed = '%s' "
"WHERE zoneidnumber = %d AND version = %d",
flag_name, zoneid, zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if(!results.Success()) {
c->Message(Chat::Red, "Error updating zone: %s", results.ErrorMessage().c_str());
return;
}
c->Message(Chat::Yellow, "Success! Zone %s now requires a flag, named %s", database.GetZoneName(zoneid), flag_name);
return;
}
if(!strcasecmp(sep->arg[1], "unlockzone")) {
uint32 zoneid = 0;
if(sep->arg[2][0] != '\0') {
zoneid = atoi(sep->arg[2]);
if(zoneid < 1) {
zoneid = database.GetZoneID(sep->arg[2]);
}
}
if(zoneid < 1) {
c->Message(Chat::Red, "zone required. see help.");
return;
}
std::string query = StringFormat("UPDATE zone SET flag_needed = '' "
"WHERE zoneidnumber = %d AND version = %d",
zoneid, zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if(!results.Success()) {
c->Message(Chat::Yellow, "Error updating zone: %s", results.ErrorMessage().c_str());
return;
}
c->Message(Chat::Yellow, "Success! Zone %s no longer requires a flag.", database.GetZoneName(zoneid));
return;
}
if(!strcasecmp(sep->arg[1], "listzones")) {
std::string query = "SELECT zoneidnumber, short_name, long_name, version, flag_needed "
"FROM zone WHERE flag_needed != ''";
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return;
}
c->Message(Chat::White, "Zones which require flags:");
for (auto row = results.begin(); row != results.end(); ++row)
c->Message(Chat::White, "Zone %s (%s,%s) version %s requires key %s", row[2], row[0], row[1], row[3], row[4]);
return;
}
if(!strcasecmp(sep->arg[1], "give")) {
uint32 zoneid = 0;
if(sep->arg[2][0] != '\0') {
zoneid = atoi(sep->arg[2]);
if(zoneid < 1) {
zoneid = database.GetZoneID(sep->arg[2]);
}
}
if(zoneid < 1) {
c->Message(Chat::Red, "zone required. see help.");
return;
}
Mob *t = c->GetTarget();
if(t == nullptr || !t->IsClient()) {
c->Message(Chat::Red, "client target required");
return;
}
t->CastToClient()->SetZoneFlag(zoneid);
return;
}
if(!strcasecmp(sep->arg[1], "give")) {
uint32 zoneid = 0;
if(sep->arg[2][0] != '\0') {
zoneid = atoi(sep->arg[2]);
if(zoneid < 1) {
zoneid = database.GetZoneID(sep->arg[2]);
}
}
if(zoneid < 1) {
c->Message(Chat::Red, "zone required. see help.");
return;
}
Mob *t = c->GetTarget();
if(t == nullptr || !t->IsClient()) {
c->Message(Chat::Red, "client target required");
return;
}
t->CastToClient()->ClearZoneFlag(zoneid);
return;
}
c->Message(Chat::Yellow, "Invalid action specified. use '#flagedit help' for help");
}
void command_serverrules(Client *c, const Seperator *sep)
{
c->SendRules(c);
}
void command_acceptrules(Client *c, const Seperator *sep)
{
if(!database.GetAgreementFlag(c->AccountID()))
{
database.SetAgreementFlag(c->AccountID());
c->SendAppearancePacket(AT_Anim, ANIM_STAND);
c->Message(Chat::White,"It is recorded you have agreed to the rules.");
}
}
void command_guildcreate(Client *c, const Seperator *sep)
{
if(strlen(sep->argplus[1])>4 && strlen(sep->argplus[1])<16)
{
guild_mgr.AddGuildApproval(sep->argplus[1],c);
}
else
{
c->Message(Chat::White,"Guild name must be more than 4 characters and less than 16.");
}
}
void command_guildapprove(Client *c, const Seperator *sep)
{
guild_mgr.AddMemberApproval(atoi(sep->arg[1]),c);
}
void command_guildlist(Client *c, const Seperator *sep)
{
GuildApproval* tmp = guild_mgr.FindGuildByIDApproval(atoi(sep->arg[1]));
if(tmp)
{
tmp->ApprovedMembers(c);
}
else
c->Message(Chat::White,"Could not find reference id.");
}
void command_hatelist(Client *c, const Seperator *sep) {
Mob *target = c->GetTarget();
if(target == nullptr) {
c->Message(Chat::White, "Error: you must have a target.");
return;
}
c->Message(Chat::White, "Display hate list for %s..", target->GetName());
target->PrintHateListToClient(c);
}
void command_rules(Client *c, const Seperator *sep) {
//super-command for managing rules settings
if(sep->arg[1][0] == '\0' || !strcasecmp(sep->arg[1], "help")) {
c->Message(Chat::White, "Syntax: #rules [subcommand].");
c->Message(Chat::White, "-- Rule Set Manipulation --");
c->Message(Chat::White, "...listsets - List avaliable rule sets");
c->Message(Chat::White, "...current - gives the name of the ruleset currently running in this zone");
c->Message(Chat::White, "...reload - Reload the selected ruleset in this zone");
c->Message(Chat::White, "...switch (ruleset name) - Change the selected ruleset and load it");
c->Message(Chat::White, "...load (ruleset name) - Load a ruleset in just this zone without changing the selected set");
//too lazy to write this right now:
// c->Message(Chat::White, "...wload (ruleset name) - Load a ruleset in all zones without changing the selected set");
c->Message(Chat::White, "...store [ruleset name] - Store the running ruleset as the specified name");
c->Message(Chat::White, "---------------------");
c->Message(Chat::White, "-- Running Rule Manipulation --");
c->Message(Chat::White, "...reset - Reset all rules to their default values");
c->Message(Chat::White, "...get [rule] - Get the specified rule's local value");
c->Message(Chat::White, "...set (rule) (value) - Set the specified rule to the specified value locally only");
c->Message(Chat::White, "...setdb (rule) (value) - Set the specified rule to the specified value locally and in the DB");
c->Message(Chat::White, "...list [catname] - List all rules in the specified category (or all categiries if omitted)");
c->Message(Chat::White, "...values [catname] - List the value of all rules in the specified category");
return;
}
if(!strcasecmp(sep->arg[1], "current")) {
c->Message(Chat::White, "Currently running ruleset '%s' (%d)", RuleManager::Instance()->GetActiveRuleset(),
RuleManager::Instance()->GetActiveRulesetID());
} else if(!strcasecmp(sep->arg[1], "listsets")) {
std::map<int, std::string> sets;
if(!RuleManager::Instance()->ListRulesets(&database, sets)) {
c->Message(Chat::Red, "Failed to list rule sets!");
return;
}
c->Message(Chat::White, "Avaliable rule sets:");
std::map<int, std::string>::iterator cur, end;
cur = sets.begin();
end = sets.end();
for(; cur != end; ++cur) {
c->Message(Chat::White, "(%d) %s", cur->first, cur->second.c_str());
}
} else if(!strcasecmp(sep->arg[1], "reload")) {
RuleManager::Instance()->LoadRules(&database, RuleManager::Instance()->GetActiveRuleset(), true);
c->Message(Chat::White, "The active ruleset (%s (%d)) has been reloaded", RuleManager::Instance()->GetActiveRuleset(),
RuleManager::Instance()->GetActiveRulesetID());
} else if(!strcasecmp(sep->arg[1], "switch")) {
//make sure this is a valid rule set..
int rsid = RuleManager::Instance()->GetRulesetID(&database, sep->arg[2]);
if(rsid < 0) {
c->Message(Chat::Red, "Unknown rule set '%s'", sep->arg[2]);
return;
}
if(!database.SetVariable("RuleSet", sep->arg[2])) {
c->Message(Chat::Red, "Failed to update variables table to change selected rule set");
return;
}
//TODO: we likely want to reload this ruleset everywhere...
RuleManager::Instance()->LoadRules(&database, sep->arg[2], true);
c->Message(Chat::White, "The selected ruleset has been changed to (%s (%d)) and reloaded locally", sep->arg[2], rsid);
} else if(!strcasecmp(sep->arg[1], "load")) {
//make sure this is a valid rule set..
int rsid = RuleManager::Instance()->GetRulesetID(&database, sep->arg[2]);
if(rsid < 0) {
c->Message(Chat::Red, "Unknown rule set '%s'", sep->arg[2]);
return;
}
RuleManager::Instance()->LoadRules(&database, sep->arg[2], true);
c->Message(Chat::White, "Loaded ruleset '%s' (%d) locally", sep->arg[2], rsid);
} else if(!strcasecmp(sep->arg[1], "store")) {
if(sep->argnum == 1) {
//store current rule set.
RuleManager::Instance()->SaveRules(&database);
c->Message(Chat::White, "Rules saved");
} else if(sep->argnum == 2) {
RuleManager::Instance()->SaveRules(&database, sep->arg[2]);
int prersid = RuleManager::Instance()->GetActiveRulesetID();
int rsid = RuleManager::Instance()->GetRulesetID(&database, sep->arg[2]);
if(rsid < 0) {
c->Message(Chat::Red, "Unable to query ruleset ID after store, it most likely failed.");
} else {
c->Message(Chat::White, "Stored rules as ruleset '%s' (%d)", sep->arg[2], rsid);
if(prersid != rsid) {
c->Message(Chat::White, "Rule set %s (%d) is now active in this zone", sep->arg[2], rsid);
}
}
} else {
c->Message(Chat::Red, "Invalid argument count, see help.");
return;
}
} else if(!strcasecmp(sep->arg[1], "reset")) {
RuleManager::Instance()->ResetRules(true);
c->Message(Chat::White, "The running ruleset has been set to defaults");
} else if(!strcasecmp(sep->arg[1], "get")) {
if(sep->argnum != 2) {
c->Message(Chat::Red, "Invalid argument count, see help.");
return;
}
std::string value;
if(!RuleManager::Instance()->GetRule(sep->arg[2], value))
c->Message(Chat::Red, "Unable to find rule %s", sep->arg[2]);
else
c->Message(Chat::White, "%s - %s", sep->arg[2], value.c_str());
} else if(!strcasecmp(sep->arg[1], "set")) {
if(sep->argnum != 3) {
c->Message(Chat::Red, "Invalid argument count, see help.");
return;
}
if(!RuleManager::Instance()->SetRule(sep->arg[2], sep->arg[3], nullptr, false, true)) {
c->Message(Chat::Red, "Failed to modify rule");
} else {
c->Message(Chat::White, "Rule modified locally.");
}
} else if(!strcasecmp(sep->arg[1], "setdb")) {
if(sep->argnum != 3) {
c->Message(Chat::Red, "Invalid argument count, see help.");
return;
}
if(!RuleManager::Instance()->SetRule(sep->arg[2], sep->arg[3], &database, true, true)) {
c->Message(Chat::Red, "Failed to modify rule");
} else {
c->Message(Chat::White, "Rule modified locally and in the database.");
}
} else if(!strcasecmp(sep->arg[1], "list")) {
if(sep->argnum == 1) {
std::vector<const char *> rule_list;
if(!RuleManager::Instance()->ListCategories(rule_list)) {
c->Message(Chat::Red, "Failed to list categories!");
return;
}
c->Message(Chat::White, "Rule Categories:");
std::vector<const char *>::iterator cur, end;
cur = rule_list.begin();
end = rule_list.end();
for(; cur != end; ++cur) {
c->Message(Chat::White, " %s", *cur);
}
} else if(sep->argnum == 2) {
const char *catfilt = nullptr;
if(std::string("all") != sep->arg[2])
catfilt = sep->arg[2];
std::vector<const char *> rule_list;
if(!RuleManager::Instance()->ListRules(catfilt, rule_list)) {
c->Message(Chat::Red, "Failed to list rules!");
return;
}
c->Message(Chat::White, "Rules in category %s:", sep->arg[2]);
std::vector<const char *>::iterator cur, end;
cur = rule_list.begin();
end = rule_list.end();
for(; cur != end; ++cur) {
c->Message(Chat::White, " %s", *cur);
}
} else {
c->Message(Chat::Red, "Invalid argument count, see help.");
}
} else if(!strcasecmp(sep->arg[1], "values")) {
if(sep->argnum != 2) {
c->Message(Chat::Red, "Invalid argument count, see help.");
return;
} else {
const char *catfilt = nullptr;
if(std::string("all") != sep->arg[2])
catfilt = sep->arg[2];
std::vector<const char *> rule_list;
if(!RuleManager::Instance()->ListRules(catfilt, rule_list)) {
c->Message(Chat::Red, "Failed to list rules!");
return;
}
c->Message(Chat::White, "Rules & values in category %s:", sep->arg[2]);
std::vector<const char *>::iterator cur, end;
cur = rule_list.begin();
end = rule_list.end();
for(std::string tmp_value; cur != end; ++cur) {
if (RuleManager::Instance()->GetRule(*cur, tmp_value))
c->Message(Chat::White, " %s - %s", *cur, tmp_value.c_str());
}
}
} else {
c->Message(Chat::Yellow, "Invalid action specified. use '#rules help' for help");
}
}
void command_task(Client *c, const Seperator *sep) {
//super-command for managing tasks
if(sep->arg[1][0] == '\0' || !strcasecmp(sep->arg[1], "help")) {
c->Message(Chat::White, "Syntax: #task [subcommand].");
c->Message(Chat::White, "-- Task System Commands --");
c->Message(Chat::White, "...show - List active tasks for a client");
c->Message(Chat::White, "...update <TaskID> <ActivityID> [Count]");
c->Message(Chat::White, "...reloadall - Reload all Task information from the database");
c->Message(Chat::White, "...reload task <TaskID> - Reload Task and Activity informnation for a single task");
c->Message(Chat::White, "...reload lists - Reload goal/reward list information");
c->Message(Chat::White, "...reload prox - Reload proximity information");
c->Message(Chat::White, "...reload sets - Reload task set information");
return;
}
if(!strcasecmp(sep->arg[1], "show")) {
if(c->GetTarget() && c->GetTarget()->IsClient())
c->GetTarget()->CastToClient()->ShowClientTasks();
else
c->ShowClientTasks();
return;
}
if(!strcasecmp(sep->arg[1], "update")) {
if(sep->argnum>=3) {
int TaskID = atoi(sep->arg[2]);
int ActivityID = atoi(sep->arg[3]);
int Count=1;
if(sep->argnum>=4) {
Count = atoi(sep->arg[4]);
if(Count <= 0)
Count = 1;
}
c->Message(Chat::Yellow, "Updating Task %i, Activity %i, Count %i", TaskID, ActivityID, Count);
c->UpdateTaskActivity(TaskID, ActivityID, Count);
}
return;
}
if(!strcasecmp(sep->arg[1], "reloadall")) {
c->Message(Chat::Yellow, "Sending reloadtasks to world");
worldserver.SendReloadTasks(RELOADTASKS);
c->Message(Chat::Yellow, "Back again");
return;
}
if(!strcasecmp(sep->arg[1], "reload")) {
if(sep->arg[2][0] != '\0') {
if(!strcasecmp(sep->arg[2], "lists")) {
c->Message(Chat::Yellow, "Sending reload lists to world");
worldserver.SendReloadTasks(RELOADTASKGOALLISTS);
c->Message(Chat::Yellow, "Back again");
return;
}
if(!strcasecmp(sep->arg[2], "prox")) {
c->Message(Chat::Yellow, "Sending reload proximities to world");
worldserver.SendReloadTasks(RELOADTASKPROXIMITIES);
c->Message(Chat::Yellow, "Back again");
return;
}
if(!strcasecmp(sep->arg[2], "sets")) {
c->Message(Chat::Yellow, "Sending reload task sets to world");
worldserver.SendReloadTasks(RELOADTASKSETS);
c->Message(Chat::Yellow, "Back again");
return;
}
if(!strcasecmp(sep->arg[2], "task") && (sep->arg[3][0] != '\0')) {
int TaskID = atoi(sep->arg[3]);
if((TaskID > 0) && (TaskID < MAXTASKS)) {
c->Message(Chat::Yellow, "Sending reload task %i to world");
worldserver.SendReloadTasks(RELOADTASKS, TaskID);
c->Message(Chat::Yellow, "Back again");
return;
}
}
}
}
c->Message(Chat::White, "Unable to interpret command. Type #task help");
}
void command_reloadtitles(Client *c, const Seperator *sep)
{
auto pack = new ServerPacket(ServerOP_ReloadTitles, 0);
worldserver.SendPacket(pack);
safe_delete(pack);
c->Message(Chat::Yellow, "Player Titles Reloaded.");
}
void command_traindisc(Client *c, const Seperator *sep)
{
Client *t = c;
if (c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
if (sep->argnum < 1 || !sep->IsNumber(1)) {
c->Message(Chat::White, "FORMAT: #traindisc <max level> <min level>");
return;
}
uint8 max_level = (uint8)atol(sep->arg[1]);
if (!c->GetGM() && max_level >(uint8)RuleI(Character, MaxLevel))
max_level = (uint8)RuleI(Character, MaxLevel); // default to Character:MaxLevel if we're not a GM & it's higher than the max level
uint8 min_level = (sep->IsNumber(2) ? (uint8)atol(sep->arg[2]) : 1); // default to 1 if there isn't a 2nd argument
if (!c->GetGM() && min_level > (uint8)RuleI(Character, MaxLevel))
min_level = (uint8)RuleI(Character, MaxLevel); // default to Character:MaxLevel if we're not a GM & it's higher than the max level
if(max_level < 1 || min_level < 1) {
c->Message(Chat::White, "ERROR: Level must be greater than 1.");
return;
}
if (min_level > max_level) {
c->Message(Chat::White, "Error: Min Level must be less than or equal to Max Level.");
return;
}
t->Message(Chat::White, "Training disciplines");
if(t != c)
c->Message(Chat::White, "Training disciplines for %s.", t->GetName());
LogInfo("Train disciplines request for [{}] from [{}], levels: [{}] -> [{}]", t->GetName(), c->GetName(), min_level, max_level);
int spell_id = 0;
int count = 0;
bool change = false;
for( ; spell_id < SPDAT_RECORDS; ++spell_id) {
if (spell_id < 0 || spell_id >= SPDAT_RECORDS) {
c->Message(Chat::Red, "FATAL ERROR: Spell id out-of-range (id: %i, min: 0, max: %i)", spell_id, SPDAT_RECORDS);
return;
}
while (true) {
if (spells[spell_id].classes[WARRIOR] == 0) // check if spell exists
break;
if (spells[spell_id].classes[t->GetPP().class_ - 1] > max_level) // maximum level
break;
if (spells[spell_id].classes[t->GetPP().class_ - 1] < min_level) // minimum level
break;
if (spells[spell_id].skill == 52)
break;
uint16 spell_id_ = (uint16)spell_id;
if ((spell_id_ != spell_id) || (spell_id != spell_id_)) {
c->Message(Chat::Red, "FATAL ERROR: Type conversion data loss with spell_id (%i != %u)", spell_id, spell_id_);
return;
}
if (!IsDiscipline(spell_id_))
break;
for (uint32 r = 0; r < MAX_PP_DISCIPLINES; ++r) {
if (t->GetPP().disciplines.values[r] == spell_id_) {
t->Message(Chat::Red, "You already know this discipline.");
break; // continue the 1st loop
}
else if (t->GetPP().disciplines.values[r] == 0) {
t->GetPP().disciplines.values[r] = spell_id_;
database.SaveCharacterDisc(t->CharacterID(), r, spell_id_);
change = true;
t->Message(Chat::White, "You have learned a new discipline!");
++count; // success counter
break; // continue the 1st loop
} // if we get to this point, there's already a discipline in this slot, so we continue onto the next slot
}
break;
}
}
if (change)
t->SendDisciplineUpdate();
if (count > 0) {
t->Message(Chat::White, "Successfully trained %u disciplines.", count);
if (t != c)
c->Message(Chat::White, "Successfully trained %u disciplines for %s.", count, t->GetName());
} else {
t->Message(Chat::White, "No disciplines trained.");
if (t != c)
c->Message(Chat::White, "No disciplines trained for %s.", t->GetName());
}
}
void command_setgraveyard(Client *c, const Seperator *sep)
{
uint32 zoneid = 0;
uint32 graveyard_id = 0;
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t=c->GetTarget()->CastToClient();
if(!sep->arg[1][0]) {
c->Message(Chat::White, "Usage: #setgraveyard [zonename]");
return;
}
zoneid = database.GetZoneID(sep->arg[1]);
if(zoneid > 0) {
graveyard_id = database.CreateGraveyardRecord(zoneid, t->GetPosition());
if(graveyard_id > 0) {
c->Message(Chat::White, "Successfuly added a new record for this graveyard!");
if(database.AddGraveyardIDToZone(zoneid, graveyard_id) > 0) {
c->Message(Chat::White, "Successfuly added this new graveyard for the zone %s.", sep->arg[1]);
// TODO: Set graveyard data to the running zone process.
c->Message(Chat::White, "Done!");
}
else
c->Message(Chat::White, "Unable to add this new graveyard to the zone %s.", sep->arg[1]);
}
else {
c->Message(Chat::White, "Unable to create a new graveyard record in the database.");
}
}
else {
c->Message(Chat::White, "Unable to retrieve a ZoneID for the zone: %s", sep->arg[1]);
}
return;
}
void command_deletegraveyard(Client *c, const Seperator *sep)
{
uint32 zoneid = 0;
uint32 graveyard_id = 0;
if(!sep->arg[1][0]) {
c->Message(Chat::White, "Usage: #deletegraveyard [zonename]");
return;
}
zoneid = database.GetZoneID(sep->arg[1]);
graveyard_id = database.GetZoneGraveyardID(zoneid, 0);
if(zoneid > 0 && graveyard_id > 0) {
if(database.DeleteGraveyard(zoneid, graveyard_id))
c->Message(Chat::White, "Successfuly deleted graveyard %u for zone %s.", graveyard_id, sep->arg[1]);
else
c->Message(Chat::White, "Unable to delete graveyard %u for zone %s.", graveyard_id, sep->arg[1]);
}
else {
if(zoneid <= 0)
c->Message(Chat::White, "Unable to retrieve a ZoneID for the zone: %s", sep->arg[1]);
else if(graveyard_id <= 0)
c->Message(Chat::White, "Unable to retrieve a valid GraveyardID for the zone: %s", sep->arg[1]);
}
return;
}
void command_summonburiedplayercorpse(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
else {
c->Message(Chat::White, "You must first select a target!");
return;
}
Corpse* PlayerCorpse = database.SummonBuriedCharacterCorpses(t->CharacterID(), t->GetZoneID(), zone->GetInstanceID(), t->GetPosition());
if(!PlayerCorpse)
c->Message(Chat::White, "Your target doesn't have any buried corpses.");
return;
}
void command_getplayerburiedcorpsecount(Client *c, const Seperator *sep)
{
Client *t=c;
if(c->GetTarget() && c->GetTarget()->IsClient() && c->GetGM())
t = c->GetTarget()->CastToClient();
else {
c->Message(Chat::White, "You must first select a target!");
return;
}
uint32 CorpseCount = database.GetCharacterBuriedCorpseCount(t->CharacterID());
if(CorpseCount > 0)
c->Message(Chat::White, "Your target has a total of %u buried corpses.", CorpseCount);
else
c->Message(Chat::White, "Your target doesn't have any buried corpses.");
return;
}
void command_refreshgroup(Client *c, const Seperator *sep)
{
if(!c)
return;
Group *g = c->GetGroup();
if(!g)
return;
database.RefreshGroupFromDB(c);
//g->SendUpdate(7, c);
}
void command_advnpcspawn(Client *c, const Seperator *sep)
{
Mob *target=c->GetTarget();
if (strcasecmp(sep->arg[1], "maketype") == 0) {
if(!target || !target->IsNPC()) {
c->Message(Chat::White, "Target Required!");
return;
}
database.NPCSpawnDB(6, zone->GetShortName(), zone->GetInstanceVersion(), c, target->CastToNPC());
return;
}
if (strcasecmp(sep->arg[1], "makegroup") == 0) {
if(!sep->arg[2]) {
c->Message(Chat::White, "Format: #advnpdspawn makegroup <name> [spawn limit] [dist] [max x] [min x] [max y] [min y] [delay]");
return;
}
std::string query = StringFormat("INSERT INTO spawngroup "
"(name, spawn_limit, dist, max_x, min_x, max_y, min_y, delay) "
"VALUES (\"%s\", %i, %f, %f, %f, %f, %f, %i)",
sep->arg[2],
(sep->arg[3]? atoi(sep->arg[3]): 0),
(sep->arg[4]? atof(sep->arg[4]): 0),
(sep->arg[5]? atof(sep->arg[5]): 0),
(sep->arg[6]? atof(sep->arg[6]): 0),
(sep->arg[7]? atof(sep->arg[7]): 0),
(sep->arg[8]? atof(sep->arg[8]): 0),
(sep->arg[9]? atoi(sep->arg[9]): 0));
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Invalid Arguments -- MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Group ID %i created successfully!", results.LastInsertedID());
return;
}
if (strcasecmp(sep->arg[1], "addgroupentry") == 0) {
if(!atoi(sep->arg[2]) || !atoi(sep->arg[3]) || !atoi(sep->arg[4])) {
c->Message(Chat::White, "Format: #advnpdspawn addgroupentry <spawnggroupID> <npcID> <chance>");
return;
}
std::string query = StringFormat("INSERT INTO spawnentry (spawngroupID, npcID, chance) "
"VALUES (%i, %i, %i)",
atoi(sep->arg[2]), atoi(sep->arg[3]), atoi(sep->arg[4]));
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Invalid Arguments -- MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "NPC %i added to group %i with %i chance!", atoi(sep->arg[3]), atoi(sep->arg[2]), atoi(sep->arg[4]) );
return;
}
if (strcasecmp(sep->arg[1], "editgroupbox") == 0) {
if(!atof(sep->arg[2]) || !atof(sep->arg[3]) || !atof(sep->arg[4]) || !atof(sep->arg[5]) || !atof(sep->arg[6]) || !atof(sep->arg[7]) || !atof(sep->arg[8])) {
c->Message(Chat::White, "Format: #advnpdspawn editgroupbox <spawngroupID> <dist> <max x> <min x> <max y> <min y> <delay>");
return;
}
std::string query = StringFormat("UPDATE spawngroup SET dist = '%f', max_x = '%f', min_x = '%f', "
"max_y = '%f', min_y = '%f', delay = '%i' WHERE id = '%i'",
atof(sep->arg[3]), atof(sep->arg[4]), atof(sep->arg[5]),
atof(sep->arg[6]), atof(sep->arg[7]), atoi(sep->arg[8]),
atoi(sep->arg[2]));
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Invalid Arguments -- MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Group ID %i created successfully!", results.LastInsertedID());
return;
}
if (strcasecmp(sep->arg[1], "cleargroupbox") == 0) {
if(!atoi(sep->arg[2])) {
c->Message(Chat::White, "Format: #advnpdspawn cleargroupbox <spawngroupID>");
return;
}
std::string query = StringFormat("UPDATE spawngroup "
"SET dist = '0', max_x = '0', min_x = '0', "
"max_y = '0', min_y = '0', delay = '0' "
"WHERE id = '%i' ", atoi(sep->arg[2]));
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Invalid Arguments -- MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Group ID %i created successfully!", results.LastInsertedID());
return;
}
if (strcasecmp(sep->arg[1], "addgroupspawn") == 0 && atoi(sep->arg[2])!=0) {
database.NPCSpawnDB(5, zone->GetShortName(), zone->GetInstanceVersion(), c, 0, atoi(sep->arg[2]));
c->Message(Chat::White, "Mob of group %i added successfully!", atoi(sep->arg[2]));
return;
}
if (strcasecmp(sep->arg[1], "removegroupspawn") == 0) {
if (!target || !target->IsNPC()) {
c->Message(Chat::White, "Error: Need an NPC target.");
return;
}
Spawn2* s2 = target->CastToNPC()->respawn2;
if(!s2) {
c->Message(Chat::White, "removegroupspawn FAILED -- cannot determine which spawn entry in the database this mob came from.");
return;
}
std::string query = StringFormat("DELETE FROM spawn2 WHERE id = '%i'", s2->GetID());
auto results = database.QueryDatabase(query);
if(!results.Success()) {
c->Message(Chat::Red, "Update failed! MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Spawnpoint Removed successfully.");
target->Depop(false);
return;
}
if (strcasecmp(sep->arg[1], "movespawn") == 0) {
if (!target || !target->IsNPC()) {
c->Message(Chat::White, "Error: Need an NPC target.");
return;
}
Spawn2* s2 = target->CastToNPC()->respawn2;
if(!s2) {
c->Message(Chat::White, "movespawn FAILED -- cannot determine which spawn entry in the database this mob came from.");
return;
}
std::string query = StringFormat("UPDATE spawn2 SET x = '%f', y = '%f', z = '%f', heading = '%f' "
"WHERE id = '%i'",
c->GetX(), c->GetY(), c->GetZ(), c->GetHeading(),s2->GetID());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::Red, "Update failed! MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Updating coordinates successful.");
target->GMMove(c->GetX(), c->GetY(), c->GetZ(), c->GetHeading());
return;
}
if (strcasecmp(sep->arg[1], "editrespawn") == 0) {
if (!target || !target->IsNPC()) {
c->Message(Chat::White, "Error: Need an NPC target.");
return;
}
Spawn2* s2 = target->CastToNPC()->respawn2;
uint32 new_rs = 0;
uint32 new_var = s2->GetVariance();
if(!sep->IsNumber(2)) {
c->Message(Chat::White, "editrespawn FAILED -- cannot set respawn to be 0");
return;
}
new_rs = atoi(sep->arg[2]);
if(sep->IsNumber(3))
new_var = atoi(sep->arg[3]);
if(!s2) {
c->Message(Chat::White, "editrespawn FAILED -- cannot determine which spawn entry in the database this mob came from.");
return;
}
std::string query = StringFormat("UPDATE spawn2 SET respawntime = %u, variance = %u "
"WHERE id = '%i'", new_rs, new_var, s2->GetID());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::Red, "Update failed! MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Updating respawn timer successful.");
s2->SetRespawnTimer(new_rs);
s2->SetVariance(new_var);
return;
}
if (strcasecmp(sep->arg[1], "setversion") == 0) {
if (!target || !target->IsNPC()) {
c->Message(Chat::White, "Error: Need an NPC target.");
return;
}
if(!sep->IsNumber(2)) {
c->Message(Chat::White, "setversion FAILED -- You must set a version number");
return;
}
int16 version = atoi(sep->arg[2]);
std::string query = StringFormat("UPDATE spawn2 SET version = %i "
"WHERE spawngroupID = '%i'",
version, c->GetTarget()->CastToNPC()->GetSpawnGroupId());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::Red, "Update failed! MySQL gave the following error:");
c->Message(Chat::Red, results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Version change to %i was successful from SpawnGroupID %i", version,
c->GetTarget()->CastToNPC()->GetSpawnGroupId());
c->GetTarget()->Depop(false);
return;
}
if (strcasecmp(sep->arg[1], "testload") == 0 && atoi(sep->arg[2])!=0) {
database.LoadSpawnGroupsByID(atoi(sep->arg[2]),&zone->spawn_group_list);
c->Message(Chat::White, "Group %i loaded successfully!", atoi(sep->arg[2]));
return;
}
c->Message(Chat::White, "Error: #advnpcspawn: Invalid command.");
c->Message(Chat::White, "Usage: #advnpcspawn [maketype|makegroup|addgroupentry|addgroupspawn|setversion]");
c->Message(Chat::White, "Usage: #advnpcspawn [removegroupspawn|movespawn|editrespawn|editgroupbox|cleargroupbox]");
}
void command_aggrozone(Client *c, const Seperator *sep) {
if(!c)
return;
Mob *m = c->CastToMob();
if (!m)
return;
uint32 hate = atoi(sep->arg[1]); //should default to 0 if we don't enter anything
entity_list.AggroZone(m, hate);
c->Message(Chat::White, "Train to you! Last chance to go invulnerable...");
}
void command_modifynpcstat(Client *c, const Seperator *sep)
{
if(!c)
return;
if(sep->arg[1][0] == '\0')
{
c->Message(Chat::White, "usage #modifynpcstat arg value");
c->Message(Chat::White, "Args: ac, str, sta, agi, dex, wis, _int, cha, max_hp, mr, fr, cr, pr, dr, runspeed, special_attacks, "
"attack_speed, atk, accuracy, trackable, min_hit, max_hit, see_invis_undead, see_hide, see_improved_hide, "
"hp_regen, mana_regen, aggro, assist, slow_mitigation, loottable_id, healscale, spellscale");
return;
}
if(!c->GetTarget())
return;
if(!c->GetTarget()->IsNPC())
return;
c->GetTarget()->CastToNPC()->ModifyNPCStat(sep->arg[1], sep->arg[2]);
}
void command_instance(Client *c, const Seperator *sep)
{
if(!c)
return;
//options:
//help
//create [zone_id] [version]
//destroy [instance_id]
//add [instance_id] [player_name]
//remove [instance_id] [player_name]
//list [player_name]
if(strcasecmp(sep->arg[1], "help") == 0)
{
c->Message(Chat::White, "#instance usage:");
c->Message(Chat::White, "#instance create zone_id version duration - Creates an instance of version 'version' in the "
"zone with id matching zone_id, will last for duration seconds.");
c->Message(Chat::White, "#instance destroy instance_id - Destroys the instance with id matching instance_id.");
c->Message(Chat::White, "#instance add instance_id player_name - adds the player 'player_name' to the instance "
"with id matching instance_id.");
c->Message(Chat::White, "#instance remove instance_id player_name - removes the player 'player_name' from the "
"instance with id matching instance_id.");
c->Message(Chat::White, "#instance list player_name - lists all the instances 'player_name' is apart of.");
return;
}
else if(strcasecmp(sep->arg[1], "create") == 0)
{
if(!sep->IsNumber(3) || !sep->IsNumber(4))
{
c->Message(Chat::White, "#instance create zone_id version duration - Creates an instance of version 'version' in the "
"zone with id matching zone_id, will last for duration seconds.");
return;
}
const char * zn = nullptr;
uint32 zone_id = 0;
if(sep->IsNumber(2))
{
zone_id = atoi(sep->arg[2]);
}
else
{
zone_id = database.GetZoneID(sep->arg[2]);
}
uint32 version = atoi(sep->arg[3]);
uint32 duration = atoi(sep->arg[4]);
zn = database.GetZoneName(zone_id);
if(!zn)
{
c->Message(Chat::White, "Zone with id %lu was not found by the server.", (unsigned long)zone_id);
return;
}
uint16 id = 0;
if(!database.GetUnusedInstanceID(id))
{
c->Message(Chat::White, "Server was unable to find a free instance id.");
return;
}
if(!database.CreateInstance(id, zone_id, version, duration))
{
c->Message(Chat::White, "Server was unable to create a new instance.");
return;
}
c->Message(Chat::White, "New instance %s was created with id %lu.", zn, (unsigned long)id);
}
else if(strcasecmp(sep->arg[1], "destroy") == 0)
{
if(!sep->IsNumber(2))
{
c->Message(Chat::White, "#instance destroy instance_id - Destroys the instance with id matching instance_id.");
return;
}
uint16 id = atoi(sep->arg[2]);
database.DeleteInstance(id);
c->Message(Chat::White, "Destroyed instance with id %lu.", (unsigned long)id);
}
else if(strcasecmp(sep->arg[1], "add") == 0)
{
if(!sep->IsNumber(2))
{
c->Message(Chat::White, "#instance add instance_id player_name - adds the player 'player_name' to the instance "
"with id matching instance_id.");
return;
}
uint16 id = atoi(sep->arg[2]);
uint32 charid = database.GetCharacterID(sep->arg[3]);
if(id <= 0 || charid <= 0)
{
c->Message(Chat::White, "Must enter a valid instance id and player name.");
return;
}
if(!database.CheckInstanceExists(id))
{
c->Message(Chat::White, "Instance does not exist.");
return;
}
uint32 zone_id = database.ZoneIDFromInstanceID(id);
uint32 version = database.VersionFromInstanceID(id);
uint32 cur_id = database.GetInstanceID(zone_id, charid, version);
if(cur_id == 0)
{
if(database.AddClientToInstance(id, charid))
{
c->Message(Chat::White, "Added client to instance.");
}
else
{
c->Message(Chat::White, "Failed to add client to instance.");
}
}
else
{
c->Message(Chat::White, "Client was already saved to %u which has uses the same zone and version as that instance.", cur_id);
}
}
else if(strcasecmp(sep->arg[1], "remove") == 0)
{
if(!sep->IsNumber(2))
{
c->Message(Chat::White, "#instance remove instance_id player_name - removes the player 'player_name' from the "
"instance with id matching instance_id.");
return;
}
uint16 id = atoi(sep->arg[2]);
uint32 charid = database.GetCharacterID(sep->arg[3]);
if(id <= 0 || charid <= 0)
{
c->Message(Chat::White, "Must enter a valid instance id and player name.");
}
if(database.RemoveClientFromInstance(id, charid))
{
c->Message(Chat::White, "Removed client from instance.");
}
else
{
c->Message(Chat::White, "Failed to remove client from instance.");
}
}
else if(strcasecmp(sep->arg[1], "list") == 0)
{
uint32 charid = database.GetCharacterID(sep->arg[2]);
if(charid <= 0)
{
if(c->GetTarget() == nullptr || (c->GetTarget() && !c->GetTarget()->IsClient()))
{
c->Message(Chat::White, "Character not found.");
return;
}
else
charid = c->GetTarget()->CastToClient()->CharacterID();
}
database.ListAllInstances(c, charid);
}
else
{
c->Message(Chat::White, "Invalid Argument.");
c->Message(Chat::White, "#instance usage:");
c->Message(Chat::White, "#instance create zone_id version duration - Creates an instance of version 'version' in the "
"zone with id matching zone_id, will last for duration seconds.");
c->Message(Chat::White, "#instance destroy instance_id - Destroys the instance with id matching instance_id.");
c->Message(Chat::White, "#instance add instance_id player_name - adds the player 'player_name' to the instance "
"with id matching instance_id.");
c->Message(Chat::White, "#instance remove instance_id player_name - removes the player 'player_name' from the "
"instance with id matching instance_id.");
c->Message(Chat::White, "#instance list player_name - lists all the instances 'player_name' is apart of.");
return;
}
}
void command_setstartzone(Client *c, const Seperator *sep)
{
uint32 startzone = 0;
Client* target = nullptr;
if(c->GetTarget() && c->GetTarget()->IsClient() && sep->arg[1][0] != 0)
target = c->GetTarget()->CastToClient();
else {
c->Message(Chat::White, "Usage: (needs PC target) #setstartzone zonename");
c->Message(Chat::White, "Optional Usage: Use '#setstartzone reset' or '#setstartzone 0' to clear a starting zone. Player can select a starting zone using /setstartcity");
return;
}
if(sep->IsNumber(1)) {
startzone = atoi(sep->arg[1]);
}
else if(strcasecmp(sep->arg[1],"reset") == 0) {
startzone = 0;
}
else {
startzone = database.GetZoneID(sep->arg[1]);
if(startzone == 0) {
c->Message(Chat::White, "Unable to locate zone '%s'", sep->arg[1]);
return;
}
}
target->SetStartZone(startzone);
}
void command_netstats(Client *c, const Seperator *sep)
{
if(c)
{
auto client = c;
if (c->GetTarget() && c->GetTarget()->IsClient()) {
client = c->GetTarget()->CastToClient();
}
if (strcasecmp(sep->arg[1], "reset") == 0) {
auto connection = c->Connection();
c->Message(Chat::White, "Resetting client stats (packet loss will not read correctly after reset).");
connection->ResetStats();
return;
}
auto connection = c->Connection();
auto opts = connection->GetManager()->GetOptions();
auto eqs_stats = connection->GetStats();
auto &stats = eqs_stats.DaybreakStats;
auto now = EQ::Net::Clock::now();
auto sec_since_stats_reset = std::chrono::duration_cast<std::chrono::duration<double>>(now - stats.created).count();
c->Message(Chat::White, "Netstats:");
c->Message(Chat::White, "--------------------------------------------------------------------");
c->Message(Chat::White, "Sent Bytes: %u (%.2f/sec)", stats.sent_bytes, stats.sent_bytes / sec_since_stats_reset);
c->Message(Chat::White, "Recv Bytes: %u (%.2f/sec)", stats.recv_bytes, stats.recv_bytes / sec_since_stats_reset);
c->Message(Chat::White, "Bytes Before Encode (Sent): %u, Compression Rate: %.2f%%", stats.bytes_before_encode,
static_cast<double>(stats.bytes_before_encode - stats.sent_bytes) / static_cast<double>(stats.bytes_before_encode) * 100.0);
c->Message(Chat::White, "Bytes After Decode (Recv): %u, Compression Rate: %.2f%%", stats.bytes_after_decode,
static_cast<double>(stats.bytes_after_decode - stats.recv_bytes) / static_cast<double>(stats.bytes_after_decode) * 100.0);
c->Message(Chat::White, "Min Ping: %u", stats.min_ping);
c->Message(Chat::White, "Max Ping: %u", stats.max_ping);
c->Message(Chat::White, "Last Ping: %u", stats.last_ping);
c->Message(Chat::White, "Average Ping: %u", stats.avg_ping);
c->Message(Chat::White, "--------------------------------------------------------------------");
c->Message(Chat::White, "(Realtime) Recv Packets: %u (%.2f/sec)", stats.recv_packets, stats.recv_packets / sec_since_stats_reset);
c->Message(Chat::White, "(Realtime) Sent Packets: %u (%.2f/sec)", stats.sent_packets, stats.sent_packets / sec_since_stats_reset);
c->Message(Chat::White, "(Sync) Recv Packets: %u", stats.sync_recv_packets);
c->Message(Chat::White, "(Sync) Sent Packets: %u", stats.sync_sent_packets);
c->Message(Chat::White, "(Sync) Remote Recv Packets: %u", stats.sync_remote_recv_packets);
c->Message(Chat::White, "(Sync) Remote Sent Packets: %u", stats.sync_remote_sent_packets);
c->Message(Chat::White, "Packet Loss In: %.2f%%", 100.0 * (1.0 - static_cast<double>(stats.sync_recv_packets) / static_cast<double>(stats.sync_remote_sent_packets)));
c->Message(Chat::White, "Packet Loss Out: %.2f%%", 100.0 * (1.0 - static_cast<double>(stats.sync_remote_recv_packets) / static_cast<double>(stats.sync_sent_packets)));
c->Message(Chat::White, "--------------------------------------------------------------------");
c->Message(Chat::White, "Resent Packets: %u (%.2f/sec)", stats.resent_packets, stats.resent_packets / sec_since_stats_reset);
c->Message(Chat::White, "Resent Fragments: %u (%.2f/sec)", stats.resent_fragments, stats.resent_fragments / sec_since_stats_reset);
c->Message(Chat::White, "Resent Non-Fragments: %u (%.2f/sec)", stats.resent_full, stats.resent_full / sec_since_stats_reset);
c->Message(Chat::White, "Dropped Datarate Packets: %u (%.2f/sec)", stats.dropped_datarate_packets, stats.dropped_datarate_packets / sec_since_stats_reset);
if (opts.daybreak_options.outgoing_data_rate > 0.0) {
c->Message(Chat::White, "Outgoing Link Saturation %.2f%% (%.2fkb/sec)", 100.0 * (1.0 - ((opts.daybreak_options.outgoing_data_rate - stats.datarate_remaining) / opts.daybreak_options.outgoing_data_rate)), opts.daybreak_options.outgoing_data_rate);
}
if (strcasecmp(sep->arg[1], "full") == 0) {
c->Message(Chat::White, "--------------------------------------------------------------------");
c->Message(Chat::White, "Sent Packet Types");
for (auto i = 0; i < _maxEmuOpcode; ++i) {
auto cnt = eqs_stats.SentCount[i];
if (cnt > 0) {
c->Message(Chat::White, "%s: %u (%.2f / sec)", OpcodeNames[i], cnt, cnt / sec_since_stats_reset);
}
}
c->Message(Chat::White, "--------------------------------------------------------------------");
c->Message(Chat::White, "Recv Packet Types");
for (auto i = 0; i < _maxEmuOpcode; ++i) {
auto cnt = eqs_stats.RecvCount[i];
if (cnt > 0) {
c->Message(Chat::White, "%s: %u (%.2f / sec)", OpcodeNames[i], cnt, cnt / sec_since_stats_reset);
}
}
}
c->Message(Chat::White, "--------------------------------------------------------------------");
}
}
void command_object(Client *c, const Seperator *sep)
{
if (!c)
return; // Crash Suppressant: No client. How did we get here?
// Save it here. We sometimes have need to refer to it in multiple places.
const char *usage_string = "Usage: #object List|Add|Edit|Move|Rotate|Save|Copy|Delete|Undo";
if ((!sep) || (sep->argnum == 0)) {
c->Message(Chat::White, usage_string);
return;
}
Object *o = nullptr;
Object_Struct od;
Door door;
Doors *doors;
Door_Struct *ds;
uint32 id = 0;
uint32 itemid = 0;
uint32 icon = 0;
uint32 instance = 0;
uint32 newid = 0;
uint16 radius;
EQApplicationPacket *app;
bool bNewObject = false;
float x2;
float y2;
// Temporary object type for static objects to allow manipulation
// NOTE: Zone::LoadZoneObjects() currently loads this as an uint8, so max value is 255!
static const uint32 staticType = 255;
// Case insensitive commands (List == list == LIST)
strlwr(sep->arg[1]);
if (strcasecmp(sep->arg[1], "list") == 0) {
// Insufficient or invalid args
if ((sep->argnum < 2) || (sep->arg[2][0] < '0') ||
((sep->arg[2][0] > '9') && ((sep->arg[2][0] & 0xDF) != 'A'))) {
c->Message(Chat::White, "Usage: #object List All|(radius)");
return;
}
if ((sep->arg[2][0] & 0xDF) == 'A')
radius = 0; // List All
else if ((radius = atoi(sep->arg[2])) <= 0)
radius = 500; // Invalid radius. Default to 500 units.
if (radius == 0)
c->Message(Chat::White, "Objects within this zone:");
else
c->Message(Chat::White, "Objects within %u units of your current location:", radius);
std::string query;
if (radius)
query = StringFormat(
"SELECT id, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20 "
"FROM object WHERE zoneid = %u AND version = %u "
"AND (xpos BETWEEN %.1f AND %.1f) "
"AND (ypos BETWEEN %.1f AND %.1f) "
"AND (zpos BETWEEN %.1f AND %.1f) "
"ORDER BY id",
zone->GetZoneID(), zone->GetInstanceVersion(),
c->GetX() - radius, // Yes, we're actually using a bounding box instead of a radius.
c->GetX() + radius, // Much less processing power used this way.
c->GetY() - radius, c->GetY() + radius, c->GetZ() - radius, c->GetZ() + radius);
else
query = StringFormat("SELECT id, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20 "
"FROM object WHERE zoneid = %u AND version = %u "
"ORDER BY id",
zone->GetZoneID(), zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Error in objects query");
return;
}
for (auto row = results.begin(); row != results.end(); ++row) {
id = atoi(row[0]);
od.x = atof(row[1]);
od.y = atof(row[2]);
od.z = atof(row[3]);
od.heading = atof(row[4]);
itemid = atoi(row[5]);
strn0cpy(od.object_name, row[6], sizeof(od.object_name));
od.object_name[sizeof(od.object_name) - 1] =
'\0'; // Required if strlen(row[col++]) exactly == sizeof(object_name)
od.object_type = atoi(row[7]);
icon = atoi(row[8]);
od.size = atoi(row[9]);
od.solidtype = atoi(row[10]);
od.unknown020 = atoi(row[11]);
switch (od.object_type) {
case 0: // Static Object
case staticType: // Static Object unlocked for changes
if (od.size == 0) // Unknown08 field is optional Size parameter for static objects
od.size = 100; // Static object default Size is 100%
c->Message(Chat::White, "- STATIC Object (%s): id %u, x %.1f, y %.1f, z %.1f, h %.1f, model %s, "
"size %u, solidtype %u, incline %u",
(od.object_type == 0) ? "locked" : "unlocked", id, od.x, od.y, od.z,
od.heading, od.object_name, od.size, od.solidtype, od.unknown020);
break;
case OT_DROPPEDITEM: // Ground Spawn
c->Message(Chat::White, "- TEMPORARY Object: id %u, x %.1f, y %.1f, z %.1f, h %.1f, itemid %u, "
"model %s, icon %u",
id, od.x, od.y, od.z, od.heading, itemid, od.object_name, icon);
break;
default: // All others == Tradeskill Objects
c->Message(Chat::White, "- TRADESKILL Object: id %u, x %.1f, y %.1f, z %.1f, h %.1f, model %s, "
"type %u, icon %u",
id, od.x, od.y, od.z, od.heading, od.object_name, od.object_type, icon);
break;
}
}
c->Message(Chat::White, "%u object%s found", results.RowCount(), (results.RowCount() == 1) ? "" : "s");
return;
}
if (strcasecmp(sep->arg[1], "add") == 0) {
// Insufficient or invalid arguments
if ((sep->argnum < 3) ||
((sep->arg[3][0] == '\0') && (sep->arg[4][0] < '0') && (sep->arg[4][0] > '9'))) {
c->Message(Chat::White, "Usage: (Static Object): #object Add [ObjectID] 0 Model [SizePercent] "
"[SolidType] [Incline]");
c->Message(Chat::White, "Usage: (Tradeskill Object): #object Add [ObjectID] TypeNum Model Icon");
c->Message(Chat::White, "- Notes: Model must start with a letter, max length 16. SolidTypes = 0 (Solid), "
"1 (Sometimes Non-Solid)");
return;
}
int col;
if (sep->argnum > 3) { // Model name in arg3?
if ((sep->arg[3][0] <= '9') && (sep->arg[3][0] >= '0')) {
// Nope, user must have specified ObjectID. Extract it.
id = atoi(sep->arg[2]);
col = 1; // Bump all other arguments one to the right. Model is in arg4.
} else {
// Yep, arg3 is non-numeric, ObjectID must be omitted and model must be arg3
id = 0;
col = 0;
}
} else {
// Nope, only 3 args. Object ID must be omitted and arg3 must be model.
id = 0;
col = 0;
}
memset(&od, 0, sizeof(od));
od.object_type = atoi(sep->arg[2 + col]);
switch (od.object_type) {
case 0: // Static Object
if ((sep->argnum - col) > 3) {
od.size = atoi(sep->arg[4 + col]); // Size specified
if ((sep->argnum - col) > 4) {
od.solidtype = atoi(sep->arg[5 + col]); // SolidType specified
if ((sep->argnum - col) > 5)
od.unknown020 = atoi(sep->arg[6 + col]); // Incline specified
}
}
break;
case 1: // Ground Spawn
c->Message(Chat::White, "ERROR: Object Type 1 is used for temporarily spawned ground spawns and dropped "
"items, which are not supported with #object. See the 'ground_spawns' table in "
"the database.");
return;
default: // Everything else == Tradeskill Object
icon = ((sep->argnum - col) > 3) ? atoi(sep->arg[4 + col]) : 0;
if (icon == 0) {
c->Message(Chat::White, "ERROR: Required property 'Icon' not specified for Tradeskill Object");
return;
}
break;
}
od.x = c->GetX();
od.y = c->GetY();
od.z = c->GetZ() - (c->GetSize() * 0.625f);
od.heading = c->GetHeading();
std::string query;
if (id) {
// ID specified. Verify that it doesn't already exist.
query = StringFormat("SELECT COUNT(*) FROM object WHERE ID = %u", id);
auto results = database.QueryDatabase(query);
if (results.Success() && results.RowCount() != 0) {
auto row = results.begin();
if (atoi(row[0]) > 0) // Yep, in database already.
id = 0;
}
// Not in database. Already spawned, just not saved?
// Yep, already spawned.
if (id && entity_list.FindObject(id))
id = 0;
if (id == 0) {
c->Message(Chat::White, "ERROR: An object already exists with the id %u", atoi(sep->arg[2]));
return;
}
}
int objectsFound = 0;
// Verify no other objects already in this spot (accidental double-click of Hotkey?)
query = StringFormat(
"SELECT COUNT(*) FROM object WHERE zoneid = %u "
"AND version=%u AND (xpos BETWEEN %.1f AND %.1f) "
"AND (ypos BETWEEN %.1f AND %.1f) "
"AND (zpos BETWEEN %.1f AND %.1f)",
zone->GetZoneID(), zone->GetInstanceVersion(), od.x - 0.2f,
od.x + 0.2f, // Yes, we're actually using a bounding box instead of a radius.
od.y - 0.2f, od.y + 0.2f, // Much less processing power used this way.
od.z - 0.2f, od.z + 0.2f); // It's pretty forgiving, though, allowing for close-proximity objects
auto results = database.QueryDatabase(query);
if (results.Success() && results.RowCount() != 0) {
auto row = results.begin();
objectsFound = atoi(row[0]); // Number of nearby objects from database
}
// No objects found in database too close. How about spawned but not yet saved?
if (objectsFound == 0 && entity_list.FindNearbyObject(od.x, od.y, od.z, 0.2f))
objectsFound = 1;
if (objectsFound) {
c->Message(Chat::White, "ERROR: Object already at this location.");
return;
}
// Strip any single quotes from objectname (SQL injection FTL!)
strn0cpy(od.object_name, sep->arg[3 + col], sizeof(od.object_name));
uint32 len = strlen(od.object_name);
for (col = 0; col < (uint32)len; col++) {
if (od.object_name[col] != '\'')
continue;
// Uh oh, 1337 h4x0r monkeying around! Strip that apostrophe!
memcpy(&od.object_name[col], &od.object_name[col + 1], len - col);
len--;
col--;
}
strupr(od.object_name); // Model names are always upper-case.
if ((od.object_name[0] < 'A') || (od.object_name[0] > 'Z')) {
c->Message(Chat::White, "ERROR: Model name must start with a letter.");
return;
}
if (id == 0) {
// No ID specified. Get a best-guess next number from the database
// If there's a problem retrieving an ID from the database, it'll end up being object # 1. No
// biggie.
query = "SELECT MAX(id) FROM object";
results = database.QueryDatabase(query);
if (results.Success() && results.RowCount() != 0) {
auto row = results.begin();
id = atoi(row[0]);
}
id++;
}
// Make sure not to overwrite already-spawned objects that haven't been saved yet.
while (o = entity_list.FindObject(id))
id++;
// Static object
if (od.object_type == 0)
od.object_type = staticType; // Temporary. We'll make it 0 when we Save
od.zone_id = zone->GetZoneID();
od.zone_instance = zone->GetInstanceVersion();
o = new Object(id, od.object_type, icon, od, nullptr);
// Add to our zone entity list and spawn immediately for all clients
entity_list.AddObject(o, true);
// Bump player back to avoid getting stuck inside new object
x2 = 10.0f * sin(c->GetHeading() / 256.0f * 3.14159265f);
y2 = 10.0f * cos(c->GetHeading() / 256.0f * 3.14159265f);
c->MovePC(c->GetX() - x2, c->GetY() - y2, c->GetZ(), c->GetHeading());
c->Message(Chat::White, "Spawning object with tentative id %u at location (%.1f, %.1f, %.1f heading %.1f). Use "
"'#object Save' to save to database when satisfied with placement.",
id, od.x, od.y, od.z, od.heading);
// Temporary Static Object
if (od.object_type == staticType)
c->Message(Chat::White, "- Note: Static Object will act like a tradeskill container and will not reflect "
"size, solidtype, or incline values until you commit with '#object Save', after "
"which it will be unchangeable until you use '#object Edit' and zone back in.");
return;
}
if (strcasecmp(sep->arg[1], "edit") == 0) {
if ((sep->argnum < 2) || ((id = atoi(sep->arg[2])) < 1)) {
c->Message(Chat::White, "Usage: #object Edit (ObjectID) [PropertyName] [NewValue]");
c->Message(Chat::White, "- Static Object (Type 0) Properties: model, type, size, solidtype, incline");
c->Message(Chat::White, "- Tradeskill Object (Type 2+) Properties: model, type, icon");
return;
}
o = entity_list.FindObject(id);
// Object already available in-zone?
if (o) {
// Yep, looks like we can make real-time changes.
if (sep->argnum < 4) {
// Or not. '#object Edit (ObjectID)' called without PropertyName and NewValue
c->Message(Chat::White, "Note: Object %u already unlocked and ready for changes", id);
return;
}
} else {
// Object not found in-zone in a modifiable form. Check for valid matching circumstances.
std::string query = StringFormat("SELECT zoneid, version, type FROM object WHERE id = %u", id);
auto results = database.QueryDatabase(query);
if (!results.Success() || results.RowCount() == 0) {
c->Message(Chat::White, "ERROR: Object %u not found", id);
return;
}
auto row = results.begin();
od.zone_id = atoi(row[0]);
od.zone_instance = atoi(row[1]);
od.object_type = atoi(row[2]);
uint32 objectsFound = 1;
// Object not in this zone?
if (od.zone_id != zone->GetZoneID()) {
c->Message(Chat::White, "ERROR: Object %u not in this zone.", id);
return;
}
// Object not in this instance?
if (od.zone_instance != zone->GetInstanceVersion()) {
c->Message(Chat::White, "ERROR: Object %u not part of this instance version.", id);
return;
}
switch (od.object_type) {
case 0: // Static object needing unlocking
// Convert to tradeskill object temporarily for changes
query = StringFormat("UPDATE object SET type = %u WHERE id = %u", staticType, id);
database.QueryDatabase(query);
c->Message(Chat::White, "Static Object %u unlocked for editing. You must zone out and back in to "
"make your changes, then commit them with '#object Save'.",
id);
if (sep->argnum >= 4)
c->Message(Chat::White, "NOTE: The change you specified has not been applied, since the "
"static object had not been unlocked for editing yet.");
return;
case OT_DROPPEDITEM:
c->Message(Chat::White, "ERROR: Object %u is a temporarily spawned ground spawn or dropped item, "
"which cannot be manipulated with #object. See the 'ground_spawns' table "
"in the database.",
id);
return;
case staticType:
c->Message(Chat::White, "ERROR: Object %u has been unlocked for editing, but you must zone out "
"and back in for your client to refresh its object table before you can "
"make changes to it.",
id);
return;
default:
// Unknown error preventing us from seeing the object in the zone.
c->Message(Chat::White, "ERROR: Unknown problem attempting to manipulate object %u", id);
return;
}
}
// If we're here, we have a manipulable object ready for changes.
strlwr(sep->arg[3]); // Case insensitive PropertyName
strupr(sep->arg[4]); // In case it's model name, which should always be upper-case
// Read current object info for reference
icon = o->GetIcon();
o->GetObjectData(&od);
// We'll be a little more picky with property names, to prevent errors. Check against the whole word.
if (strcmp(sep->arg[3], "model") == 0) {
if ((sep->arg[4][0] < 'A') || (sep->arg[4][0] > 'Z')) {
c->Message(Chat::White, "ERROR: Model names must begin with a letter.");
return;
}
strn0cpy(od.object_name, sep->arg[4], sizeof(od.object_name));
o->SetObjectData(&od);
c->Message(Chat::White, "Object %u now being rendered with model '%s'", id, od.object_name);
} else if (strcmp(sep->arg[3], "type") == 0) {
if ((sep->arg[4][0] < '0') || (sep->arg[4][0] > '9')) {
c->Message(Chat::White, "ERROR: Invalid type number");
return;
}
od.object_type = atoi(sep->arg[4]);
switch (od.object_type) {
case 0:
// Convert Static Object to temporary changeable type
od.object_type = staticType;
c->Message(Chat::White, "Note: Static Object will still act like tradeskill object and will not "
"reflect size, solidtype, or incline settings until committed to the "
"database with '#object Save', after which it will be unchangeable until "
"it is unlocked again with '#object Edit'.");
break;
case OT_DROPPEDITEM:
c->Message(Chat::White, "ERROR: Object Type 1 is used for temporarily spawned ground spawns and "
"dropped items, which are not supported with #object. See the "
"'ground_spawns' table in the database.");
return;
default:
c->Message(Chat::White, "Object %u changed to Tradeskill Object Type %u", id, od.object_type);
break;
}
o->SetType(od.object_type);
} else if (strcmp(sep->arg[3], "size") == 0) {
if (od.object_type != staticType) {
c->Message(
0, "ERROR: Object %u is not a Static Object and does not support the Size property",
id);
return;
}
if ((sep->arg[4][0] < '0') || (sep->arg[4][0] > '9')) {
c->Message(Chat::White, "ERROR: Invalid size specified. Please enter a number.");
return;
}
od.size = atoi(sep->arg[4]);
o->SetObjectData(&od);
if (od.size == 0) // 0 == unspecified == 100%
od.size = 100;
c->Message(Chat::White, "Static Object %u set to %u%% size. Size will take effect when you commit to the "
"database with '#object Save', after which the object will be unchangeable until "
"you unlock it again with '#object Edit' and zone out and back in.",
id, od.size);
} else if (strcmp(sep->arg[3], "solidtype") == 0) {
if (od.object_type != staticType) {
c->Message(Chat::White, "ERROR: Object %u is not a Static Object and does not support the "
"SolidType property",
id);
return;
}
if ((sep->arg[4][0] < '0') || (sep->arg[4][0] > '9')) {
c->Message(Chat::White, "ERROR: Invalid solidtype specified. Please enter a number.");
return;
}
od.solidtype = atoi(sep->arg[4]);
o->SetObjectData(&od);
c->Message(Chat::White, "Static Object %u set to SolidType %u. Change will take effect when you commit "
"to the database with '#object Save'. Support for this property is on a "
"per-model basis, mostly seen in smaller objects such as chests and tables.",
id, od.solidtype);
} else if (strcmp(sep->arg[3], "icon") == 0) {
if ((od.object_type < 2) || (od.object_type == staticType)) {
c->Message(Chat::White, "ERROR: Object %u is not a Tradeskill Object and does not support the "
"Icon property",
id);
return;
}
if ((icon = atoi(sep->arg[4])) == 0) {
c->Message(Chat::White, "ERROR: Invalid Icon specified. Please enter an icon number.");
return;
}
o->SetIcon(icon);
c->Message(Chat::White, "Tradeskill Object %u icon set to %u", id, icon);
} else if (strcmp(sep->arg[3], "incline") == 0) {
if (od.object_type != staticType) {
c->Message(
0,
"ERROR: Object %u is not a Static Object and does not support the Incline property",
id);
return;
}
if ((sep->arg[4][0] < '0') || (sep->arg[4][0] > '9')) {
c->Message(
0,
"ERROR: Invalid Incline specified. Please enter a number. Normal range is 0-512.");
return;
}
od.unknown020 = atoi(sep->arg[4]);
o->SetObjectData(&od);
c->Message(Chat::White, "Static Object %u set to %u incline. Incline will take effect when you commit to "
"the database with '#object Save', after which the object will be unchangeable "
"until you unlock it again with '#object Edit' and zone out and back in.",
id, od.unknown020);
} else {
c->Message(Chat::White, "ERROR: Unrecognized property name: %s", sep->arg[3]);
return;
}
// Repop object to have it reflect the change.
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
app = new EQApplicationPacket();
o->CreateSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
return;
}
if (strcasecmp(sep->arg[1], "move") == 0) {
if ((sep->argnum < 2) || // Not enough arguments
((id = atoi(sep->arg[2])) == 0) || // ID not specified
(((sep->arg[3][0] < '0') || (sep->arg[3][0] > '9')) && ((sep->arg[3][0] & 0xDF) != 'T') &&
(sep->arg[3][0] != '-') && (sep->arg[3][0] != '.'))) { // Location argument not specified correctly
c->Message(Chat::White, "Usage: #object Move (ObjectID) ToMe|(x y z [h])");
return;
}
if (!(o = entity_list.FindObject(id))) {
std::string query = StringFormat("SELECT zoneid, version, type FROM object WHERE id = %u", id);
auto results = database.QueryDatabase(query);
if (!results.Success() || results.RowCount() == 0) {
c->Message(Chat::White, "ERROR: Object %u not found", id);
return;
}
auto row = results.begin();
od.zone_id = atoi(row[0]);
od.zone_instance = atoi(row[1]);
od.object_type = atoi(row[2]);
if (od.zone_id != zone->GetZoneID()) {
c->Message(Chat::White, "ERROR: Object %u is not in this zone", id);
return;
}
if (od.zone_instance != zone->GetInstanceVersion()) {
c->Message(Chat::White, "ERROR: Object %u is not in this instance version", id);
return;
}
switch (od.object_type) {
case 0:
c->Message(Chat::White, "ERROR: Object %u is not yet unlocked for editing. Use '#object Edit' "
"then zone out and back in to move it.",
id);
return;
case staticType:
c->Message(Chat::White, "ERROR: Object %u has been unlocked for editing, but you must zone out "
"and back in before your client sees the change and will allow you to "
"move it.",
id);
return;
case 1:
c->Message(Chat::White, "ERROR: Object %u is a temporary spawned object and cannot be "
"manipulated with #object. See the 'ground_spawns' table in the "
"database.",
id);
return;
default:
c->Message(Chat::White, "ERROR: Object %u not located in zone.", id);
return;
}
}
// Move To Me
if ((sep->arg[3][0] & 0xDF) == 'T') {
od.x = c->GetX();
od.y = c->GetY();
od.z = c->GetZ() -
(c->GetSize() *
0.625f); // Compensate for #loc bumping up Z coordinate by 62.5% of character's size.
o->SetHeading(c->GetHeading());
// Bump player back to avoid getting stuck inside object
x2 = 10.0f * std::sin(c->GetHeading() / 256.0f * 3.14159265f);
y2 = 10.0f * std::cos(c->GetHeading() / 256.0f * 3.14159265f);
c->MovePC(c->GetX() - x2, c->GetY() - y2, c->GetZ(), c->GetHeading());
} // Move to x, y, z [h]
else {
od.x = atof(sep->arg[3]);
if (sep->argnum > 3)
od.y = atof(sep->arg[4]);
else
o->GetLocation(nullptr, &od.y, nullptr);
if (sep->argnum > 4)
od.z = atof(sep->arg[5]);
else
o->GetLocation(nullptr, nullptr, &od.z);
if (sep->argnum > 5)
o->SetHeading(atof(sep->arg[6]));
}
o->SetLocation(od.x, od.y, od.z);
// Despawn and respawn object to reflect change
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
app = new EQApplicationPacket();
o->CreateSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
return;
}
if (strcasecmp(sep->arg[1], "rotate") == 0) {
// Insufficient or invalid arguments
if ((sep->argnum < 3) || ((id = atoi(sep->arg[2])) == 0)) {
c->Message(Chat::White, "Usage: #object Rotate (ObjectID) (Heading, 0-512)");
return;
}
if ((o = entity_list.FindObject(id)) == nullptr) {
c->Message(Chat::White, "ERROR: Object %u not found in zone, or is a static object not yet unlocked with "
"'#object Edit' for editing.",
id);
return;
}
o->SetHeading(atof(sep->arg[3]));
// Despawn and respawn object to reflect change
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
app = new EQApplicationPacket();
o->CreateSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
return;
}
if (strcasecmp(sep->arg[1], "save") == 0) {
// Insufficient or invalid arguments
if ((sep->argnum < 2) || ((id = atoi(sep->arg[2])) == 0)) {
c->Message(Chat::White, "Usage: #object Save (ObjectID)");
return;
}
o = entity_list.FindObject(id);
od.zone_id = 0;
od.zone_instance = 0;
od.object_type = 0;
// If this ID isn't in the database yet, it's a new object
bNewObject = true;
std::string query = StringFormat("SELECT zoneid, version, type FROM object WHERE id = %u", id);
auto results = database.QueryDatabase(query);
if (results.Success() && results.RowCount() != 0) {
auto row = results.begin();
od.zone_id = atoi(row[0]);
od.zone_instance = atoi(row[1]);
od.object_type = atoi(row[2]);
// ID already in database. Not a new object.
bNewObject = false;
}
if (!o) {
// Object not found in zone. Can't save an object we can't see.
if (bNewObject) {
c->Message(Chat::White, "ERROR: Object %u not found", id);
return;
}
if (od.zone_id != zone->GetZoneID()) {
c->Message(Chat::White, "ERROR: Wrong Object ID. %u is not part of this zone.", id);
return;
}
if (od.zone_instance != zone->GetInstanceVersion()) {
c->Message(Chat::White, "ERROR: Wrong Object ID. %u is not part of this instance version.", id);
return;
}
if (od.object_type == 0) {
c->Message(Chat::White, "ERROR: Static Object %u has already been committed. Use '#object Edit "
"%u' and zone out and back in to make changes.",
id, id);
return;
}
if (od.object_type == 1) {
c->Message(Chat::White, "ERROR: Object %u is a temporarily spawned ground spawn or dropped item, "
"which is not supported with #object. See the 'ground_spawns' table in "
"the database.",
id);
return;
}
c->Message(Chat::White, "ERROR: Object %u not found.", id);
return;
}
// Oops! Another GM already saved an object with our id from another zone.
// We'll have to get a new one.
if ((od.zone_id > 0) && (od.zone_id != zone->GetZoneID()))
id = 0;
// Oops! Another GM already saved an object with our id from another instance.
// We'll have to get a new one.
if ((id > 0) && (od.zone_instance != zone->GetInstanceVersion()))
id = 0;
// If we're asking for a new ID, it's a new object.
bNewObject |= (id == 0);
o->GetObjectData(&od);
od.object_type = o->GetType();
icon = o->GetIcon();
// We're committing to the database now. Return temporary object type to actual.
if (od.object_type == staticType)
od.object_type = 0;
if (!bNewObject)
query = StringFormat("UPDATE object SET zoneid = %u, version = %u, "
"xpos = %.1f, ypos=%.1f, zpos=%.1f, heading=%.1f, "
"objectname = '%s', type = %u, icon = %u, "
"unknown08 = %u, unknown10 = %u, unknown20 = %u "
"WHERE ID = %u",
zone->GetZoneID(), zone->GetInstanceVersion(), od.x, od.y, od.z,
od.heading, od.object_name, od.object_type, icon, od.size,
od.solidtype, od.unknown020, id);
else if (id == 0)
query = StringFormat("INSERT INTO object "
"(zoneid, version, xpos, ypos, zpos, heading, objectname, "
"type, icon, unknown08, unknown10, unknown20) "
"VALUES (%u, %u, %.1f, %.1f, %.1f, %.1f, '%s', %u, %u, %u, %u, %u)",
zone->GetZoneID(), zone->GetInstanceVersion(), od.x, od.y, od.z,
od.heading, od.object_name, od.object_type, icon, od.size,
od.solidtype, od.unknown020);
else
query = StringFormat("INSERT INTO object "
"(id, zoneid, version, xpos, ypos, zpos, heading, objectname, "
"type, icon, unknown08, unknown10, unknown20) "
"VALUES (%u, %u, %u, %.1f, %.1f, %.1f, %.1f, '%s', %u, %u, %u, %u, %u)",
id, zone->GetZoneID(), zone->GetInstanceVersion(), od.x, od.y, od.z,
od.heading, od.object_name, od.object_type, icon, od.size,
od.solidtype, od.unknown020);
results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Database Error: %s", results.ErrorMessage().c_str());
return;
}
if (results.RowsAffected() == 0) {
// No change made, but no error message given
c->Message(Chat::White, "Database Error: Could not save change to Object %u", id);
return;
}
if (bNewObject) {
if (newid == results.LastInsertedID()) {
c->Message(Chat::White, "Saved new Object %u to database", id);
return;
}
c->Message(Chat::White, "Saved Object. NOTE: Database returned a new ID number for object: %u", newid);
id = newid;
return;
}
c->Message(Chat::White, "Saved changes to Object %u", id);
newid = id;
if (od.object_type == 0) {
// Static Object - Respawn as nonfunctional door
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(0, app);
safe_delete(app);
entity_list.RemoveObject(o->GetID());
memset(&door, 0, sizeof(door));
strn0cpy(door.zone_name, zone->GetShortName(), sizeof(door.zone_name));
door.db_id = 1000000000 + id; // Out of range of normal use for doors.id
door.door_id = -1; // Client doesn't care if these are all the same door_id
door.pos_x = od.x; // xpos
door.pos_y = od.y; // ypos
door.pos_z = od.z; // zpos
door.heading = od.heading; // heading
strn0cpy(door.door_name, od.object_name, sizeof(door.door_name)); // objectname
// Strip trailing "_ACTORDEF" if present. Client won't accept it for doors.
uint32 len = strlen(door.door_name);
if ((len > 9) && (memcmp(&door.door_name[len - 9], "_ACTORDEF", 10) == 0))
door.door_name[len - 9] = '\0';
memcpy(door.dest_zone, "NONE", 5);
if ((door.size = od.size) == 0) // unknown08 = optional size percentage
door.size = 100;
switch (
door.opentype =
od.solidtype) // unknown10 = optional request_nonsolid (0 or 1 or experimental number)
{
case 0:
door.opentype = 31;
break;
case 1:
door.opentype = 9;
break;
}
door.incline = od.unknown020; // unknown20 = optional incline value
door.client_version_mask = 0xFFFFFFFF;
doors = new Doors(&door);
entity_list.AddDoor(doors);
app = new EQApplicationPacket(OP_SpawnDoor, sizeof(Door_Struct));
ds = (Door_Struct *)app->pBuffer;
memset(ds, 0, sizeof(Door_Struct));
memcpy(ds->name, door.door_name, 32);
ds->xPos = door.pos_x;
ds->yPos = door.pos_y;
ds->zPos = door.pos_z;
ds->heading = door.heading;
ds->incline = door.incline;
ds->size = door.size;
ds->doorId = door.door_id;
ds->opentype = door.opentype;
ds->unknown0052[9] = 1; // *ptr-1 and *ptr-3 from EntityList::MakeDoorSpawnPacket()
ds->unknown0052[11] = 1;
entity_list.QueueClients(0, app);
safe_delete(app);
c->Message(Chat::White, "NOTE: Object %u is now a static object, and is unchangeable. To make future "
"changes, use '#object Edit' to convert it to a changeable form, then zone out "
"and back in.",
id);
}
return;
}
if (strcasecmp(sep->arg[1], "copy") == 0) {
// Insufficient or invalid arguments
if ((sep->argnum < 3) ||
(((sep->arg[2][0] & 0xDF) != 'A') && ((sep->arg[2][0] < '0') || (sep->arg[2][0] > '9')))) {
c->Message(Chat::White, "Usage: #object Copy All|(ObjectID) (InstanceVersion)");
c->Message(Chat::White, "- Note: Only objects saved in the database can be copied to another instance.");
return;
}
od.zone_instance = atoi(sep->arg[3]);
if (od.zone_instance == zone->GetInstanceVersion()) {
c->Message(Chat::White, "ERROR: Source and destination instance versions are the same.");
return;
}
if ((sep->arg[2][0] & 0xDF) == 'A') {
// Copy All
std::string query =
StringFormat("INSERT INTO object "
"(zoneid, version, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20) "
"SELECT zoneid, %u, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20 "
"FROM object WHERE zoneid = %u) AND version = %u",
od.zone_instance, zone->GetZoneID(), zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if (!results.Success()) {
c->Message(Chat::White, "Database Error: %s", results.ErrorMessage().c_str());
return;
}
c->Message(Chat::White, "Copied %u object%s into instance version %u", results.RowCount(),
(results.RowCount() == 1) ? "" : "s", od.zone_instance);
return;
}
id = atoi(sep->arg[2]);
std::string query = StringFormat("INSERT INTO object "
"(zoneid, version, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20) "
"SELECT zoneid, %u, xpos, ypos, zpos, heading, itemid, "
"objectname, type, icon, unknown08, unknown10, unknown20 "
"FROM object WHERE id = %u AND zoneid = %u AND version = %u",
od.zone_instance, id, zone->GetZoneID(), zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if (results.Success() && results.RowsAffected() > 0) {
c->Message(Chat::White, "Copied Object %u into instance version %u", id, od.zone_instance);
return;
}
// Couldn't copy the object.
// got an error message
if (!results.Success()) {
c->Message(Chat::White, "Database Error: %s", results.ErrorMessage().c_str());
return;
}
// No database error returned. See if we can figure out why.
query = StringFormat("SELECT zoneid, version FROM object WHERE id = %u", id);
results = database.QueryDatabase(query);
if (!results.Success())
return;
if (results.RowCount() == 0) {
c->Message(Chat::White, "ERROR: Object %u not found", id);
return;
}
auto row = results.begin();
// Wrong ZoneID?
if (atoi(row[0]) != zone->GetZoneID()) {
c->Message(Chat::White, "ERROR: Object %u is not part of this zone.", id);
return;
}
// Wrong Instance Version?
if (atoi(row[1]) != zone->GetInstanceVersion()) {
c->Message(Chat::White, "ERROR: Object %u is not part of this instance version.", id);
return;
}
// Well, NO clue at this point. Just let 'em know something screwed up.
c->Message(Chat::White, "ERROR: Unknown database error copying Object %u to instance version %u", id,
od.zone_instance);
return;
}
if (strcasecmp(sep->arg[1], "delete") == 0) {
if ((sep->argnum < 2) || ((id = atoi(sep->arg[2])) <= 0)) {
c->Message(Chat::White, "Usage: #object Delete (ObjectID) -- NOTE: Object deletions are permanent and "
"cannot be undone!");
return;
}
o = entity_list.FindObject(id);
if (o) {
// Object found in zone.
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(nullptr, app);
entity_list.RemoveObject(o->GetID());
// Verifying ZoneID and Version in case someone else ended up adding an object with our ID
// from a different zone/version. Don't want to delete someone else's work.
std::string query = StringFormat("DELETE FROM object "
"WHERE id = %u AND zoneid = %u "
"AND version = %u LIMIT 1",
id, zone->GetZoneID(), zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
c->Message(Chat::White, "Object %u deleted", id);
return;
}
// Object not found in zone.
std::string query = StringFormat("SELECT type FROM object "
"WHERE id = %u AND zoneid = %u "
"AND version = %u LIMIT 1",
id, zone->GetZoneID(), zone->GetInstanceVersion());
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
if (results.RowCount() == 0) {
c->Message(Chat::White, "ERROR: Object %u not found in this zone or instance!", id);
return;
}
auto row = results.begin();
switch (atoi(row[0])) {
case 0: // Static Object
query = StringFormat("DELETE FROM object WHERE id = %u "
"AND zoneid = %u AND version = %u LIMIT 1",
id, zone->GetZoneID(), zone->GetInstanceVersion());
results = database.QueryDatabase(query);
c->Message(Chat::White, "Object %u deleted. NOTE: This static object will remain for anyone currently in "
"the zone until they next zone out and in.",
id);
return;
case 1: // Temporary Spawn
c->Message(Chat::White, "ERROR: Object %u is a temporarily spawned ground spawn or dropped item, which "
"is not supported with #object. See the 'ground_spawns' table in the database.",
id);
return;
}
return;
}
if (strcasecmp(sep->arg[1], "undo") == 0) {
// Insufficient or invalid arguments
if ((sep->argnum < 2) || ((id = atoi(sep->arg[2])) == 0)) {
c->Message(Chat::White, "Usage: #object Undo (ObjectID) -- Reload object from database, undoing any "
"changes you have made");
return;
}
o = entity_list.FindObject(id);
if (!o) {
c->Message(Chat::White, "ERROR: Object %u not found in zone in a manipulable form. No changes to undo.",
id);
return;
}
if (o->GetType() == OT_DROPPEDITEM) {
c->Message(Chat::White, "ERROR: Object %u is a temporary spawned item and cannot be manipulated with "
"#object. See the 'ground_spawns' table in the database.",
id);
return;
}
// Despawn current item for reloading from database
app = new EQApplicationPacket();
o->CreateDeSpawnPacket(app);
entity_list.QueueClients(0, app);
entity_list.RemoveObject(o->GetID());
safe_delete(app);
std::string query = StringFormat("SELECT xpos, ypos, zpos, "
"heading, objectname, type, icon, "
"unknown08, unknown10, unknown20 "
"FROM object WHERE id = %u",
id);
auto results = database.QueryDatabase(query);
if (!results.Success() || results.RowCount() == 0) {
c->Message(Chat::White, "Database Error: %s", results.ErrorMessage().c_str());
return;
}
memset(&od, 0, sizeof(od));
auto row = results.begin();
od.x = atof(row[0]);
od.y = atof(row[1]);
od.z = atof(row[2]);
od.heading = atof(row[3]);
strn0cpy(od.object_name, row[4], sizeof(od.object_name));
od.object_type = atoi(row[5]);
icon = atoi(row[6]);
od.size = atoi(row[7]);
od.solidtype = atoi(row[8]);
od.unknown020 = atoi(row[9]);
if (od.object_type == 0)
od.object_type = staticType;
o = new Object(id, od.object_type, icon, od, nullptr);
entity_list.AddObject(o, true);
c->Message(Chat::White, "Object %u reloaded from database.", id);
return;
}
c->Message(Chat::White, usage_string);
}
void command_showspellslist(Client *c, const Seperator *sep)
{
Mob *target = c->GetTarget();
if (!target) {
c->Message(Chat::White, "Must target an NPC.");
return;
}
if (!target->IsNPC()) {
c->Message(Chat::White, "%s is not an NPC.", target->GetName());
return;
}
target->CastToNPC()->AISpellsList(c);
return;
}
void command_raidloot(Client *c, const Seperator *sep)
{
if(!sep->arg[1][0]) {
c->Message(Chat::White, "Usage: #raidloot [LEADER/GROUPLEADER/SELECTED/ALL]");
return;
}
Raid *r = c->GetRaid();
if(r)
{
for(int x = 0; x < 72; ++x)
{
if(r->members[x].member == c)
{
if(r->members[x].IsRaidLeader == 0)
{
c->Message(Chat::White, "You must be the raid leader to use this command.");
}
else
{
break;
}
}
}
if(strcasecmp(sep->arg[1], "LEADER") == 0)
{
c->Message(Chat::Yellow, "Loot type changed to: 1");
r->ChangeLootType(1);
}
else if(strcasecmp(sep->arg[1], "GROUPLEADER") == 0)
{
c->Message(Chat::Yellow, "Loot type changed to: 2");
r->ChangeLootType(2);
}
else if(strcasecmp(sep->arg[1], "SELECTED") == 0)
{
c->Message(Chat::Yellow, "Loot type changed to: 3");
r->ChangeLootType(3);
}
else if(strcasecmp(sep->arg[1], "ALL") == 0)
{
c->Message(Chat::Yellow, "Loot type changed to: 4");
r->ChangeLootType(4);
}
else
{
c->Message(Chat::White, "Usage: #raidloot [LEADER/GROUPLEADER/SELECTED/ALL]");
}
}
else
{
c->Message(Chat::White, "You must be in a raid to use that command.");
}
}
void command_emoteview(Client *c, const Seperator *sep)
{
if(!c->GetTarget() || !c->GetTarget()->IsNPC())
{
c->Message(Chat::White, "You must target a NPC to view their emotes.");
return;
}
if(c->GetTarget() && c->GetTarget()->IsNPC())
{
int count=0;
int emoteid = c->GetTarget()->CastToNPC()->GetEmoteID();
LinkedListIterator<NPC_Emote_Struct*> iterator(zone->NPCEmoteList);
iterator.Reset();
while(iterator.MoreElements())
{
NPC_Emote_Struct* nes = iterator.GetData();
if(emoteid == nes->emoteid)
{
c->Message(Chat::White, "EmoteID: %i Event: %i Type: %i Text: %s", nes->emoteid, nes->event_, nes->type, nes->text);
count++;
}
iterator.Advance();
}
if (count == 0)
c->Message(Chat::White, "No emotes found.");
else
c->Message(Chat::White, "%i emote(s) found", count);
}
}
void command_emotesearch(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0)
c->Message(Chat::White, "Usage: #emotesearch [search string or emoteid]");
else
{
const char *search_criteria=sep->argplus[1];
int count=0;
if (Seperator::IsNumber(search_criteria))
{
uint16 emoteid = atoi(search_criteria);
LinkedListIterator<NPC_Emote_Struct*> iterator(zone->NPCEmoteList);
iterator.Reset();
while(iterator.MoreElements())
{
NPC_Emote_Struct* nes = iterator.GetData();
if(emoteid == nes->emoteid)
{
c->Message(Chat::White, "EmoteID: %i Event: %i Type: %i Text: %s", nes->emoteid, nes->event_, nes->type, nes->text);
count++;
}
iterator.Advance();
}
if (count == 0)
c->Message(Chat::White, "No emotes found.");
else
c->Message(Chat::White, "%i emote(s) found", count);
}
else
{
char sText[64];
char sCriteria[515];
strn0cpy(sCriteria, search_criteria, sizeof(sCriteria));
strupr(sCriteria);
char* pdest;
LinkedListIterator<NPC_Emote_Struct*> iterator(zone->NPCEmoteList);
iterator.Reset();
while(iterator.MoreElements())
{
NPC_Emote_Struct* nes = iterator.GetData();
strn0cpy(sText, nes->text, sizeof(sText));
strupr(sText);
pdest = strstr(sText, sCriteria);
if (pdest != nullptr)
{
c->Message(Chat::White, "EmoteID: %i Event: %i Type: %i Text: %s", nes->emoteid, nes->event_, nes->type, nes->text);
count++;
}
if (count == 50)
break;
iterator.Advance();
}
if (count == 50)
c->Message(Chat::White, "50 emotes shown...too many results.");
else
c->Message(Chat::White, "%i emote(s) found", count);
}
}
}
void command_reloademote(Client *c, const Seperator *sep)
{
zone->NPCEmoteList.Clear();
zone->LoadNPCEmotes(&zone->NPCEmoteList);
c->Message(Chat::White, "NPC emotes reloaded.");
}
void command_globalview(Client *c, const Seperator *sep)
{
NPC * npcmob = nullptr;
if(c->GetTarget() && c->GetTarget()->IsNPC())
{
npcmob = c->GetTarget()->CastToNPC();
QGlobalCache *npc_c = nullptr;
QGlobalCache *char_c = nullptr;
QGlobalCache *zone_c = nullptr;
if(npcmob)
npc_c = npcmob->GetQGlobals();
char_c = c->GetQGlobals();
zone_c = zone->GetQGlobals();
std::list<QGlobal> globalMap;
uint32 ntype = 0;
if(npcmob)
ntype = npcmob->GetNPCTypeID();
if(npc_c)
{
QGlobalCache::Combine(globalMap, npc_c->GetBucket(), ntype, c->CharacterID(), zone->GetZoneID());
}
if(char_c)
{
QGlobalCache::Combine(globalMap, char_c->GetBucket(), ntype, c->CharacterID(), zone->GetZoneID());
}
if(zone_c)
{
QGlobalCache::Combine(globalMap, zone_c->GetBucket(), ntype, c->CharacterID(), zone->GetZoneID());
}
auto iter = globalMap.begin();
uint32 gcount = 0;
c->Message(Chat::White, "Name, Value");
while(iter != globalMap.end())
{
c->Message(Chat::White, "%s %s", (*iter).name.c_str(), (*iter).value.c_str());
++iter;
++gcount;
}
c->Message(Chat::White, "%u globals loaded.", gcount);
}
else
{
QGlobalCache *char_c = nullptr;
QGlobalCache *zone_c = nullptr;
char_c = c->GetQGlobals();
zone_c = zone->GetQGlobals();
std::list<QGlobal> globalMap;
uint32 ntype = 0;
if(char_c)
{
QGlobalCache::Combine(globalMap, char_c->GetBucket(), ntype, c->CharacterID(), zone->GetZoneID());
}
if(zone_c)
{
QGlobalCache::Combine(globalMap, zone_c->GetBucket(), ntype, c->CharacterID(), zone->GetZoneID());
}
auto iter = globalMap.begin();
uint32 gcount = 0;
c->Message(Chat::White, "Name, Value");
while(iter != globalMap.end())
{
c->Message(Chat::White, "%s %s", (*iter).name.c_str(), (*iter).value.c_str());
++iter;
++gcount;
}
c->Message(Chat::White, "%u globals loaded.", gcount);
}
}
void command_distance(Client *c, const Seperator *sep) {
if(c && c->GetTarget()) {
Mob* target = c->GetTarget();
c->Message(Chat::White, "Your target, %s, is %1.1f units from you.", c->GetTarget()->GetName(), Distance(c->GetPosition(), target->GetPosition()));
}
}
void command_cvs(Client *c, const Seperator *sep)
{
if(c)
{
auto pack =
new ServerPacket(ServerOP_ClientVersionSummary, sizeof(ServerRequestClientVersionSummary_Struct));
ServerRequestClientVersionSummary_Struct *srcvss = (ServerRequestClientVersionSummary_Struct*)pack->pBuffer;
strn0cpy(srcvss->Name, c->GetName(), sizeof(srcvss->Name));
worldserver.SendPacket(pack);
safe_delete(pack);
}
}
void command_max_all_skills(Client *c, const Seperator *sep)
{
if(c)
{
for (int i = 0; i <= EQEmu::skills::HIGHEST_SKILL; ++i)
{
if (i >= EQEmu::skills::SkillSpecializeAbjure && i <= EQEmu::skills::SkillSpecializeEvocation)
{
c->SetSkill((EQEmu::skills::SkillType)i, 50);
}
else
{
int max_skill_level = database.GetSkillCap(c->GetClass(), (EQEmu::skills::SkillType)i, c->GetLevel());
c->SetSkill((EQEmu::skills::SkillType)i, max_skill_level);
}
}
}
}
void command_showbonusstats(Client *c, const Seperator *sep)
{
if (c->GetTarget() == 0)
c->Message(Chat::White, "ERROR: No target!");
else if (!c->GetTarget()->IsMob() && !c->GetTarget()->IsClient())
c->Message(Chat::White, "ERROR: Target is not a Mob or Player!");
else {
bool bAll = false;
if(sep->arg[1][0] == '\0' || strcasecmp(sep->arg[1], "all") == 0)
bAll = true;
if (bAll || (strcasecmp(sep->arg[1], "item")==0)) {
c->Message(Chat::White, "Target Item Bonuses:");
c->Message(Chat::White, " Accuracy: %i%% Divine Save: %i%%", c->GetTarget()->GetItemBonuses().Accuracy, c->GetTarget()->GetItemBonuses().DivineSaveChance);
c->Message(Chat::White, " Flurry: %i%% HitChance: %i%%", c->GetTarget()->GetItemBonuses().FlurryChance, c->GetTarget()->GetItemBonuses().HitChance / 15);
}
if (bAll || (strcasecmp(sep->arg[1], "spell")==0)) {
c->Message(Chat::White, " Target Spell Bonuses:");
c->Message(Chat::White, " Accuracy: %i%% Divine Save: %i%%", c->GetTarget()->GetSpellBonuses().Accuracy, c->GetTarget()->GetSpellBonuses().DivineSaveChance);
c->Message(Chat::White, " Flurry: %i%% HitChance: %i%% ", c->GetTarget()->GetSpellBonuses().FlurryChance, c->GetTarget()->GetSpellBonuses().HitChance / 15);
}
c->Message(Chat::White, " Effective Casting Level: %i", c->GetTarget()->GetCasterLevel(0));
}
}
void command_reloadallrules(Client *c, const Seperator *sep)
{
if(c)
{
auto pack = new ServerPacket(ServerOP_ReloadRules, 0);
worldserver.SendPacket(pack);
c->Message(Chat::Red, "Successfully sent the packet to world to reload rules globally. (including world)");
safe_delete(pack);
}
}
void command_reloadworldrules(Client *c, const Seperator *sep)
{
if(c)
{
auto pack = new ServerPacket(ServerOP_ReloadRulesWorld, 0);
worldserver.SendPacket(pack);
c->Message(Chat::Red, "Successfully sent the packet to world to reload rules. (only world)");
safe_delete(pack);
}
}
void command_camerashake(Client *c, const Seperator *sep)
{
if(c)
{
if(sep->arg[1][0] && sep->arg[2][0])
{
auto pack = new ServerPacket(ServerOP_CameraShake, sizeof(ServerCameraShake_Struct));
ServerCameraShake_Struct* scss = (ServerCameraShake_Struct*) pack->pBuffer;
scss->duration = atoi(sep->arg[1]);
scss->intensity = atoi(sep->arg[2]);
worldserver.SendPacket(pack);
c->Message(Chat::Red, "Successfully sent the packet to world! Shake it, world, shake it!");
safe_delete(pack);
}
else {
c->Message(Chat::Red, "Usage -- #camerashake [duration], [intensity [1-10])");
}
}
return;
}
void command_disarmtrap(Client *c, const Seperator *sep)
{
Mob *target = c->GetTarget();
if(!target)
{
c->Message(Chat::Red, "You must have a target.");
return;
}
if(target->IsNPC())
{
if (c->HasSkill(EQEmu::skills::SkillDisarmTraps))
{
if(DistanceSquaredNoZ(c->GetPosition(), target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
c->Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
c->HandleLDoNDisarm(target->CastToNPC(), c->GetSkill(EQEmu::skills::SkillDisarmTraps), LDoNTypeMechanical);
}
else
c->Message(Chat::Red, "You do not have the disarm trap skill.");
}
}
void command_sensetrap(Client *c, const Seperator *sep)
{
Mob * target = c->GetTarget();
if(!target)
{
c->Message(Chat::Red, "You must have a target.");
return;
}
if(target->IsNPC())
{
if (c->HasSkill(EQEmu::skills::SkillSenseTraps))
{
if(DistanceSquaredNoZ(c->GetPosition(), target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
c->Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
c->HandleLDoNSenseTraps(target->CastToNPC(), c->GetSkill(EQEmu::skills::SkillSenseTraps), LDoNTypeMechanical);
}
else
c->Message(Chat::Red, "You do not have the sense traps skill.");
}
}
void command_picklock(Client *c, const Seperator *sep)
{
Mob * target = c->GetTarget();
if(!target)
{
c->Message(Chat::Red, "You must have a target.");
return;
}
if(target->IsNPC())
{
if (c->HasSkill(EQEmu::skills::SkillPickLock))
{
if(DistanceSquaredNoZ(c->GetPosition(), target->GetPosition()) > RuleI(Adventure, LDoNTrapDistanceUse))
{
c->Message(Chat::Red, "%s is too far away.", target->GetCleanName());
return;
}
c->HandleLDoNPickLock(target->CastToNPC(), c->GetSkill(EQEmu::skills::SkillPickLock), LDoNTypeMechanical);
}
else
c->Message(Chat::Red, "You do not have the pick locks skill.");
}
}
void command_profanity(Client *c, const Seperator *sep)
{
std::string arg1(sep->arg[1]);
while (true) {
if (arg1.compare("list") == 0) {
// do nothing
}
else if (arg1.compare("clear") == 0) {
EQEmu::ProfanityManager::DeleteProfanityList(&database);
auto pack = new ServerPacket(ServerOP_RefreshCensorship);
worldserver.SendPacket(pack);
safe_delete(pack);
}
else if (arg1.compare("add") == 0) {
if (!EQEmu::ProfanityManager::AddProfanity(&database, sep->arg[2]))
c->Message(Chat::Red, "Could not add '%s' to the profanity list.", sep->arg[2]);
auto pack = new ServerPacket(ServerOP_RefreshCensorship);
worldserver.SendPacket(pack);
safe_delete(pack);
}
else if (arg1.compare("del") == 0) {
if (!EQEmu::ProfanityManager::RemoveProfanity(&database, sep->arg[2]))
c->Message(Chat::Red, "Could not delete '%s' from the profanity list.", sep->arg[2]);
auto pack = new ServerPacket(ServerOP_RefreshCensorship);
worldserver.SendPacket(pack);
safe_delete(pack);
}
else if (arg1.compare("reload") == 0) {
if (!EQEmu::ProfanityManager::UpdateProfanityList(&database))
c->Message(Chat::Red, "Could not reload the profanity list.");
auto pack = new ServerPacket(ServerOP_RefreshCensorship);
worldserver.SendPacket(pack);
safe_delete(pack);
}
else {
break;
}
std::string popup;
const auto &list = EQEmu::ProfanityManager::GetProfanityList();
for (const auto &iter : list) {
popup.append(iter);
popup.append("<br>");
}
if (list.empty())
popup.append("** Censorship Inactive **<br>");
else
popup.append("** End of List **<br>");
c->SendPopupToClient("Profanity List", popup.c_str());
return;
}
c->Message(Chat::White, "Usage: #profanity [list] - shows profanity list");
c->Message(Chat::White, "Usage: #profanity [clear] - deletes all entries");
c->Message(Chat::White, "Usage: #profanity [add] [<word>] - adds entry");
c->Message(Chat::White, "Usage: #profanity [del] [<word>] - deletes entry");
c->Message(Chat::White, "Usage: #profanity [reload] - reloads profanity list");
}
void command_mysql(Client *c, const Seperator *sep)
{
if(!sep->arg[1][0] || !sep->arg[2][0]) {
c->Message(Chat::White, "Usage: #mysql query \"Query here\"");
return;
}
if (strcasecmp(sep->arg[1], "help") == 0) {
c->Message(Chat::White, "MYSQL In-Game CLI Interface:");
c->Message(Chat::White, "Example: #mysql query \"Query goes here quoted\" -s -h");
c->Message(Chat::White, "To use 'like \"%%something%%\" replace the %% with #");
c->Message(Chat::White, "Example: #mysql query \"select * from table where name like \"#something#\"");
c->Message(Chat::White, "-s - Spaces select entries apart");
c->Message(Chat::White, "-h - Colors every other select result");
return;
}
if (strcasecmp(sep->arg[1], "query") == 0) {
///Parse switches here
int argnum = 3;
bool optionS = false;
bool optionH = false;
while(sep->arg[argnum] && strlen(sep->arg[argnum]) > 1){
switch(sep->arg[argnum][1]){
case 's': optionS = true; break;
case 'h': optionH = true; break;
default:
c->Message(Chat::Yellow, "%s, there is no option '%c'", c->GetName(), sep->arg[argnum][1]);
return;
}
++argnum;
}
int highlightTextIndex = 0;
std::string query(sep->arg[2]);
//swap # for % so like queries can work
std::replace(query.begin(), query.end(), '#', '%');
auto results = database.QueryDatabase(query);
if (!results.Success()) {
return;
}
//Using sep->arg[2] again, replace # with %% so it doesn't screw up when sent through vsnprintf in Message
query = sep->arg[2];
int pos = query.find('#');
while(pos != std::string::npos) {
query.erase(pos,1);
query.insert(pos, "%%");
pos = query.find('#');
}
c->Message(Chat::Yellow, "---Running query: '%s'", query.c_str());
for (auto row = results.begin(); row != results.end(); ++row) {
std::stringstream lineText;
std::vector<std::string> lineVec;
for(int i = 0; i < results.RowCount(); i++) {
//split lines that could overflow the buffer in Client::Message and get cut off
//This will crash MQ2 @ 4000 since their internal buffer is only 2048.
//Reducing it to 2000 fixes that but splits more results from tables with a lot of columns.
if(lineText.str().length() > 4000) {
lineVec.push_back(lineText.str());
lineText.str("");
}
lineText << results.FieldName(i) << ":" << "[" << (row[i] ? row[i] : "nullptr") << "] ";
}
lineVec.push_back(lineText.str());
if(optionS) //This provides spacing for the space switch
c->Message(Chat::White, " ");
if(optionH) //This option will highlight every other row
highlightTextIndex = 1 - highlightTextIndex;
for(int lineNum = 0; lineNum < lineVec.size(); ++lineNum)
c->Message(highlightTextIndex, lineVec[lineNum].c_str());
}
}
}
void command_xtargets(Client *c, const Seperator *sep)
{
Client *t;
if(c->GetTarget() && c->GetTarget()->IsClient())
t = c->GetTarget()->CastToClient();
else
t = c;
if(sep->arg[1][0])
{
uint8 NewMax = atoi(sep->arg[1]);
if((NewMax < 5) || (NewMax > XTARGET_HARDCAP))
{
c->Message(Chat::Red, "Number of XTargets must be between 5 and %i", XTARGET_HARDCAP);
return;
}
t->SetMaxXTargets(NewMax);
c->Message(Chat::White, "Max number of XTargets set to %i", NewMax);
}
else
t->ShowXTargets(c);
}
void command_zopp(Client *c, const Seperator *sep)
{ // - Owner only command..non-targetable to eliminate malicious or mischievious activities.
if (!c)
return;
else if (sep->argnum < 3 || sep->argnum > 4)
c->Message(Chat::White, "Usage: #zopp [trade/summon] [slot id] [item id] [*charges]");
else if (!strcasecmp(sep->arg[1], "trade") == 0 && !strcasecmp(sep->arg[1], "t") == 0 && !strcasecmp(sep->arg[1], "summon") == 0 && !strcasecmp(sep->arg[1], "s") == 0)
c->Message(Chat::White, "Usage: #zopp [trade/summon] [slot id] [item id] [*charges]");
else if (!sep->IsNumber(2) || !sep->IsNumber(3) || (sep->argnum == 4 && !sep->IsNumber(4)))
c->Message(Chat::White, "Usage: #zopp [trade/summon] [slot id] [item id] [*charges]");
else {
ItemPacketType packettype;
if (strcasecmp(sep->arg[1], "trade") == 0 || strcasecmp(sep->arg[1], "t") == 0) {
packettype = ItemPacketTrade;
}
else {
packettype = ItemPacketLimbo;
}
int16 slotid = atoi(sep->arg[2]);
uint32 itemid = atoi(sep->arg[3]);
int16 charges = sep->argnum == 4 ? atoi(sep->arg[4]) : 1; // defaults to 1 charge if not specified
const EQEmu::ItemData* FakeItem = database.GetItem(itemid);
if (!FakeItem) {
c->Message(Chat::Red, "Error: Item [%u] is not a valid item id.", itemid);
return;
}
int16 item_status = 0;
const EQEmu::ItemData* item = database.GetItem(itemid);
if(item) {
item_status = static_cast<int16>(item->MinStatus);
}
if (item_status > c->Admin()) {
c->Message(Chat::Red, "Error: Insufficient status to use this command.");
return;
}
if (charges < 0 || charges > FakeItem->StackSize) {
c->Message(Chat::Red, "Warning: The specified charge count does not meet expected criteria!");
c->Message(Chat::White, "Processing request..results may cause unpredictable behavior.");
}
EQEmu::ItemInstance* FakeItemInst = database.CreateItem(FakeItem, charges);
c->SendItemPacket(slotid, FakeItemInst, packettype);
c->Message(Chat::White, "Sending zephyr op packet to client - [%s] %s (%u) with %i %s to slot %i.",
packettype == ItemPacketTrade ? "Trade" : "Summon", FakeItem->Name, itemid, charges,
std::abs(charges == 1) ? "charge" : "charges", slotid);
safe_delete(FakeItemInst);
}
}
void command_augmentitem(Client *c, const Seperator *sep)
{
if (!c)
return;
auto in_augment = new AugmentItem_Struct[sizeof(AugmentItem_Struct)];
in_augment->container_slot = 1000; // <watch>
in_augment->augment_slot = -1;
if (c->GetTradeskillObject() != nullptr)
Object::HandleAugmentation(c, in_augment, c->GetTradeskillObject());
safe_delete_array(in_augment);
}
void command_questerrors(Client *c, const Seperator *sep)
{
std::list<std::string> err;
parse->GetErrors(err);
c->Message(Chat::White, "Current Quest Errors:");
auto iter = err.begin();
int i = 0;
while(iter != err.end()) {
if(i >= 30) {
c->Message(Chat::White, "Maximum of 30 Errors shown...");
break;
}
c->Message(Chat::White, iter->c_str());
++i;
++iter;
}
}
void command_enablerecipe(Client *c, const Seperator *sep)
{
uint32 recipe_id = 0;
bool success = false;
if (c) {
if (sep->argnum == 1) {
recipe_id = atoi(sep->arg[1]);
}
else {
c->Message(Chat::White, "Invalid number of arguments.\nUsage: #enablerecipe recipe_id");
return;
}
if (recipe_id > 0) {
success = database.EnableRecipe(recipe_id);
if (success) {
c->Message(Chat::White, "Recipe enabled.");
}
else {
c->Message(Chat::White, "Recipe not enabled.");
}
}
else {
c->Message(Chat::White, "Invalid recipe id.\nUsage: #enablerecipe recipe_id");
}
}
}
void command_disablerecipe(Client *c, const Seperator *sep)
{
uint32 recipe_id = 0;
bool success = false;
if (c) {
if (sep->argnum == 1) {
recipe_id = atoi(sep->arg[1]);
}
else {
c->Message(Chat::White, "Invalid number of arguments.\nUsage: #disablerecipe recipe_id");
return;
}
if (recipe_id > 0) {
success = database.DisableRecipe(recipe_id);
if (success) {
c->Message(Chat::White, "Recipe disabled.");
}
else {
c->Message(Chat::White, "Recipe not disabled.");
}
}
else {
c->Message(Chat::White, "Invalid recipe id.\nUsage: #disablerecipe recipe_id");
}
}
}
void command_npctype_cache(Client *c, const Seperator *sep)
{
if (sep->argnum > 0) {
for (int i = 0; i < sep->argnum; ++i) {
if (strcasecmp(sep->arg[i + 1], "all") == 0) {
c->Message(Chat::White, "Clearing all npc types from the cache.");
zone->ClearNPCTypeCache(-1);
}
else {
int id = atoi(sep->arg[i + 1]);
if (id > 0) {
c->Message(Chat::White, "Clearing npc type %d from the cache.", id);
zone->ClearNPCTypeCache(id);
return;
}
}
}
}
else {
c->Message(Chat::White, "Usage:");
c->Message(Chat::White, "#npctype_cache [npctype_id] ...");
c->Message(Chat::White, "#npctype_cache all");
}
}
void command_merchantopenshop(Client *c, const Seperator *sep)
{
Mob *merchant = c->GetTarget();
if (!merchant || merchant->GetClass() != MERCHANT) {
c->Message(Chat::White, "You must target a merchant to open their shop.");
return;
}
merchant->CastToNPC()->MerchantOpenShop();
}
void command_merchantcloseshop(Client *c, const Seperator *sep)
{
Mob *merchant = c->GetTarget();
if (!merchant || merchant->GetClass() != MERCHANT) {
c->Message(Chat::White, "You must target a merchant to close their shop.");
return;
}
merchant->CastToNPC()->MerchantCloseShop();
}
void command_shownumhits(Client *c, const Seperator *sep)
{
c->ShowNumHits();
return;
}
void command_shownpcgloballoot(Client *c, const Seperator *sep)
{
auto tar = c->GetTarget();
if (!tar || !tar->IsNPC()) {
c->Message(Chat::White, "You must target an NPC to use this command.");
return;
}
auto npc = tar->CastToNPC();
c->Message(Chat::White, "GlobalLoot for %s (%d)", npc->GetName(), npc->GetNPCTypeID());
zone->ShowNPCGlobalLoot(c, npc);
}
void command_tune(Client *c, const Seperator *sep)
{
//Work in progress - Kayen
if(sep->arg[1][0] == '\0' || !strcasecmp(sep->arg[1], "help")) {
c->Message(Chat::White, "Syntax: #tune [subcommand].");
c->Message(Chat::White, "-- Tune System Commands --");
c->Message(Chat::White, "-- Usage: Returning recommended combat statistical values based on a desired outcome.");
c->Message(Chat::White, "-- Note: If targeted mob does not have a target (ie not engaged in combat), YOU will be considered the target.");
c->Message(Chat::White, "-- Warning: The calculations done in this process are intense and can potentially cause zone crashes depending on parameters set, use with caution!");
c->Message(Chat::White, "-- Below are OPTIONAL parameters.");
c->Message(Chat::White, "-- Note: [interval] Determines how fast the stat being checked increases/decreases till it finds the best result. Default [ATK/AC 50][Acc/Avoid 10] ");
c->Message(Chat::White, "-- Note: [loop_max] Determines how many iterations are done to increases/decreases the stat till it finds the best result. Default [ATK/AC 100][Acc/Avoid 1000]");
c->Message(Chat::White, "-- Note: [Stat Override] Will override that stat on mob being checkd with the specified value. Default=0");
c->Message(Chat::White, "-- Note: [Info Level] How much statistical detail is displayed[0 - 3]. Default=0 ");
c->Message(Chat::White, "-- Note: Results are only approximations usually accurate to +/- 2 intervals.");
c->Message(Chat::White, "... ");
c->Message(Chat::White, "...### Category A ### Target = ATTACKER ### YOU or Target's Target = DEFENDER ###");
c->Message(Chat::White, "...### Category B ### Target = DEFENDER ### YOU or Target's Target = ATTACKER ###");
c->Message(Chat::White, "... ");
c->Message(Chat::White, "...#Returns recommended ATK adjustment +/- on ATTACKER that will result in an average mitigation pct on DEFENDER. ");
c->Message(Chat::White, "...tune FindATK [A/B] [pct mitigation] [interval][loop_max][AC Overwride][Info Level]");
c->Message(Chat::White, "... ");
c->Message(Chat::White, "...#Returns recommended AC adjustment +/- on DEFENDER for an average mitigation pct from ATTACKER. ");
c->Message(Chat::White, "...tune FindAC [A/B] [pct mitigation] [interval][loop_max][ATK Overwride][Info Level] ");
c->Message(Chat::White, "... ");
c->Message(Chat::White, "...#Returns recommended Accuracy adjustment +/- on ATTACKER that will result in a hit chance pct on DEFENDER. ");
c->Message(Chat::White, "...tune FindAccuracy [A/B] [hit chance] [interval][loop_max][Avoidance Overwride][Info Level]");
c->Message(Chat::White, "... ");
c->Message(Chat::White, "...#Returns recommended Avoidance adjustment +/- on DEFENDER for in a hit chance pct from ATTACKER. ");
c->Message(Chat::White, "...tune FindAvoidance [A/B] [pct mitigation] [interval][loop_max][Accuracy Overwride][Info Level] ");
return;
}
//Default is category A for attacker/defender settings, which then are swapped under category B.
Mob* defender = c;
Mob* attacker = c->GetTarget();
if (!attacker)
{
c->Message(Chat::White, "#Tune - Error no target selected. [#Tune help]");
return;
}
Mob* ttarget = attacker->GetTarget();
if (ttarget)
defender = ttarget;
if(!strcasecmp(sep->arg[1], "FindATK"))
{
float pct_mitigation = atof(sep->arg[3]);
int interval = atoi(sep->arg[4]);
int max_loop = atoi(sep->arg[5]);
int ac_override = atoi(sep->arg[6]);
int info_level = atoi(sep->arg[7]);
if (!pct_mitigation)
{
c->Message(Chat::Red, "#Tune - Error must enter the desired percent mitigation on defender. Ie. Defender to mitigate on average 20 pct of max damage.");
return;
}
if (!interval)
interval = 50;
if (!max_loop)
max_loop = 100;
if(!ac_override)
ac_override = 0;
if (!info_level)
info_level = 1;
if(!strcasecmp(sep->arg[2], "A"))
c->Tune_FindATKByPctMitigation(defender, attacker, pct_mitigation, interval, max_loop,ac_override,info_level);
else if(!strcasecmp(sep->arg[2], "B"))
c->Tune_FindATKByPctMitigation(attacker,defender, pct_mitigation, interval, max_loop,ac_override,info_level);
else {
c->Message(Chat::White, "#Tune - Error no category selcted. [#Tune help]");
c->Message(Chat::White, "Usage #tune FindATK [A/B] [pct mitigation] [interval][loop_max][AC Overwride][Info Level] ");
c->Message(Chat::White, "Example #tune FindATK A 60");
}
return;
}
if(!strcasecmp(sep->arg[1], "FindAC"))
{
float pct_mitigation = atof(sep->arg[3]);
int interval = atoi(sep->arg[4]);
int max_loop = atoi(sep->arg[5]);
int atk_override = atoi(sep->arg[6]);
int info_level = atoi(sep->arg[7]);
if (!pct_mitigation)
{
c->Message(Chat::Red, "#Tune - Error must enter the desired percent mitigation on defender. Ie. Defender to mitigate on average 20 pct of max damage.");
return;
}
if (!interval)
interval = 50;
if (!max_loop)
max_loop = 100;
if(!atk_override)
atk_override = 0;
if (!info_level)
info_level = 1;
if(!strcasecmp(sep->arg[2], "A"))
c->Tune_FindACByPctMitigation(defender, attacker, pct_mitigation, interval, max_loop,atk_override,info_level);
else if(!strcasecmp(sep->arg[2], "B"))
c->Tune_FindACByPctMitigation(attacker, defender, pct_mitigation, interval, max_loop,atk_override,info_level);
else {
c->Message(Chat::White, "#Tune - Error no category selcted. [#Tune help]");
c->Message(Chat::White, "Usage #tune FindAC [A/B] [pct mitigation] [interval][loop_max][ATK Overwride][Info Level] ");
c->Message(Chat::White, "Example #tune FindAC A 60");
}
return;
}
if(!strcasecmp(sep->arg[1], "FindAccuracy"))
{
float hit_chance = atof(sep->arg[3]);
int interval = atoi(sep->arg[4]);
int max_loop = atoi(sep->arg[5]);
int avoid_override = atoi(sep->arg[6]);
int info_level = atoi(sep->arg[7]);
if (!hit_chance)
{
c->Message(Chat::NPCQuestSay, "#Tune - Error must enter the desired percent mitigation on defender. Ie. Defender to mitigate on average 20 pct of max damage.");
return;
}
if (!interval)
interval = 10;
if (!max_loop)
max_loop = 1000;
if(!avoid_override)
avoid_override = 0;
if (!info_level)
info_level = 1;
if (hit_chance > RuleR(Combat,MaxChancetoHit) || hit_chance < RuleR(Combat,MinChancetoHit))
{
c->Message(Chat::NPCQuestSay, "#Tune - Error hit chance out of bounds. [Max %.2f Min .2f]", RuleR(Combat,MaxChancetoHit),RuleR(Combat,MinChancetoHit));
return;
}
if(!strcasecmp(sep->arg[2], "A"))
c->Tune_FindAccuaryByHitChance(defender, attacker, hit_chance, interval, max_loop,avoid_override,info_level);
else if(!strcasecmp(sep->arg[2], "B"))
c->Tune_FindAccuaryByHitChance(attacker, defender, hit_chance, interval, max_loop,avoid_override,info_level);
else {
c->Message(Chat::White, "#Tune - Error no category selcted. [#Tune help]");
c->Message(Chat::White, "Usage #tune FindAcccuracy [A/B] [hit chance] [interval][loop_max][Avoidance Overwride][Info Level]");
c->Message(Chat::White, "Exampled #tune FindAccuracy B 30");
}
return;
}
if(!strcasecmp(sep->arg[1], "FindAvoidance"))
{
float hit_chance = atof(sep->arg[3]);
int interval = atoi(sep->arg[4]);
int max_loop = atoi(sep->arg[5]);
int acc_override = atoi(sep->arg[6]);
int info_level = atoi(sep->arg[7]);
if (!hit_chance)
{
c->Message(Chat::White, "#Tune - Error must enter the desired hit chance on defender. Ie. Defender to have hit chance of 40 pct.");
return;
}
if (!interval)
interval = 10;
if (!max_loop)
max_loop = 1000;
if(!acc_override)
acc_override = 0;
if (!info_level)
info_level = 1;
if (hit_chance > RuleR(Combat,MaxChancetoHit) || hit_chance < RuleR(Combat,MinChancetoHit))
{
c->Message(Chat::NPCQuestSay, "#Tune - Error hit chance out of bounds. [Max %.2f Min .2f]", RuleR(Combat,MaxChancetoHit),RuleR(Combat,MinChancetoHit));
return;
}
if(!strcasecmp(sep->arg[2], "A"))
c->Tune_FindAvoidanceByHitChance(defender, attacker, hit_chance, interval, max_loop,acc_override, info_level);
else if(!strcasecmp(sep->arg[2], "B"))
c->Tune_FindAvoidanceByHitChance(attacker, defender, hit_chance, interval, max_loop,acc_override, info_level);
else {
c->Message(Chat::White, "#Tune - Error no category selcted. [#Tune help]");
c->Message(Chat::White, "Usage #tune FindAvoidance [A/B] [hit chance] [interval][loop_max][Accuracy Overwride][Info Level]");
c->Message(Chat::White, "Exampled #tune FindAvoidance B 30");
}
return;
}
return;
}
void command_logtest(Client *c, const Seperator *sep){
clock_t t = std::clock(); /* Function timer start */
if (sep->IsNumber(1)){
uint32 i = 0;
t = std::clock();
for (i = 0; i < atoi(sep->arg[1]); i++){
LogDebug("[[{}]] Test #2 Took [{}] seconds", i, ((float)(std::clock() - t)) / CLOCKS_PER_SEC);
}
}
}
void command_crashtest(Client *c, const Seperator *sep)
{
c->Message(Chat::White, "Alright, now we get an GPF ;) ");
char* gpf = 0;
memcpy(gpf, "Ready to crash", 30);
}
void command_logs(Client *c, const Seperator *sep){
int logs_set = 0;
if (sep->argnum > 0) {
/* #logs reload_all */
if (strcasecmp(sep->arg[1], "reload_all") == 0){
auto pack = new ServerPacket(ServerOP_ReloadLogs, 0);
worldserver.SendPacket(pack);
c->Message(Chat::Red, "Successfully sent the packet to world to reload log settings from the database for all zones");
safe_delete(pack);
}
/* #logs list_settings */
if (strcasecmp(sep->arg[1], "list_settings") == 0 ||
(strcasecmp(sep->arg[1], "set") == 0 && strcasecmp(sep->arg[3], "") == 0)) {
c->Message(Chat::White, "[Category ID | console | file | gmsay | Category Description]");
int redisplay_columns = 0;
for (int i = 0; i < Logs::LogCategory::MaxCategoryID; i++) {
if (redisplay_columns == 10) {
c->Message(Chat::White, "[Category ID | console | file | gmsay | Category Description]");
redisplay_columns = 0;
}
c->Message(
0,
StringFormat(
"--- %i | %u | %u | %u | %s",
i,
LogSys.log_settings[i].log_to_console,
LogSys.log_settings[i].log_to_file,
LogSys.log_settings[i].log_to_gmsay,
Logs::LogCategoryName[i]
).c_str());
redisplay_columns++;
}
}
/* #logs set */
if (strcasecmp(sep->arg[1], "set") == 0){
if (strcasecmp(sep->arg[2], "console") == 0){
LogSys.log_settings[atoi(sep->arg[3])].log_to_console = atoi(sep->arg[4]);
logs_set = 1;
}
else if (strcasecmp(sep->arg[2], "file") == 0){
LogSys.log_settings[atoi(sep->arg[3])].log_to_file = atoi(sep->arg[4]);
logs_set = 1;
}
else if (strcasecmp(sep->arg[2], "gmsay") == 0){
LogSys.log_settings[atoi(sep->arg[3])].log_to_gmsay = atoi(sep->arg[4]);
logs_set = 1;
}
else{
c->Message(Chat::White, "--- #logs set [console|file|gmsay] <category_id> <debug_level (1-3)> - Sets log settings during the lifetime of the zone");
c->Message(Chat::White, "--- #logs set gmsay 20 1 - Would output Quest errors to gmsay");
}
if (logs_set == 1){
c->Message(Chat::Yellow, "Your Log Settings have been applied");
c->Message(Chat::Yellow, "Output Method: %s :: Debug Level: %i - Category: %s", sep->arg[2], atoi(sep->arg[4]), Logs::LogCategoryName[atoi(sep->arg[3])]);
}
/* We use a general 'is_category_enabled' now, let's update when we update any output settings
This is used in hot places of code to check if its enabled in any way before triggering logs
*/
if (atoi(sep->arg[4]) > 0){
LogSys.log_settings[atoi(sep->arg[3])].is_category_enabled = 1;
}
else{
LogSys.log_settings[atoi(sep->arg[3])].is_category_enabled = 0;
}
}
}
else {
c->Message(Chat::White, "#logs usage:");
c->Message(Chat::White, "--- #logs reload_all - Reload all settings in world and all zone processes with what is defined in the database");
c->Message(Chat::White, "--- #logs list_settings - Shows current log settings and categories loaded into the current process' memory");
c->Message(Chat::White, "--- #logs set [console|file|gmsay] <category_id> <debug_level (1-3)> - Sets log settings during the lifetime of the zone");
}
}
void command_mysqltest(Client *c, const Seperator *sep)
{
clock_t t = std::clock(); /* Function timer start */
if (sep->IsNumber(1)){
uint32 i = 0;
t = std::clock();
for (i = 0; i < atoi(sep->arg[1]); i++){
std::string query = "SELECT * FROM `zone`";
auto results = database.QueryDatabase(query);
}
}
LogDebug("MySQL Test Took [{}] seconds", ((float)(std::clock() - t)) / CLOCKS_PER_SEC);
}
void command_resetaa_timer(Client *c, const Seperator *sep) {
Client *target = nullptr;
if(!c->GetTarget() || !c->GetTarget()->IsClient()) {
target = c;
} else {
target = c->GetTarget()->CastToClient();
}
if(sep->IsNumber(1))
{
int timer_id = atoi(sep->arg[1]);
c->Message(Chat::White, "Reset of timer %i for %s", timer_id, c->GetName());
c->ResetAlternateAdvancementTimer(timer_id);
}
else if(!strcasecmp(sep->arg[1], "all"))
{
c->Message(Chat::White, "Reset all timers for %s", c->GetName());
c->ResetAlternateAdvancementTimers();
}
else
{
c->Message(Chat::White, "usage: #resetaa_timer [all | timer_id]");
}
}
void command_reloadaa(Client *c, const Seperator *sep) {
c->Message(Chat::White, "Reloading Alternate Advancement Data...");
zone->LoadAlternateAdvancement();
c->Message(Chat::White, "Alternate Advancement Data Reloaded");
entity_list.SendAlternateAdvancementStats();
}
inline bool file_exists(const std::string& name) {
std::ifstream f(name.c_str());
return f.good();
}
void command_hotfix(Client *c, const Seperator *sep)
{
std::string hotfix;
database.GetVariable("hotfix_name", hotfix);
std::string hotfix_name;
if (!strcasecmp(hotfix.c_str(), "hotfix_")) {
hotfix_name = "";
}
else {
hotfix_name = "hotfix_";
}
c->Message(Chat::White, "Creating and applying hotfix");
std::thread t1(
[c, hotfix_name]() {
#ifdef WIN32
if(hotfix_name.length() > 0) {
if(system(StringFormat("shared_memory -hotfix=%s", hotfix_name.c_str()).c_str()));
} else {
if(system(StringFormat("shared_memory").c_str()));
}
#else
std::string shared_memory_path = "./shared_memory";
if (file_exists("./bin/shared_memory")) {
shared_memory_path = "./bin/shared_memory";
}
if (hotfix_name.length() > 0) {
if (system(StringFormat("%s -hotfix=%s", shared_memory_path.c_str(), hotfix_name.c_str()).c_str())) {}
}
else {
if (system(StringFormat("%s", shared_memory_path.c_str()).c_str())) {}
}
#endif
database.SetVariable("hotfix_name", hotfix_name);
ServerPacket pack(ServerOP_ChangeSharedMem, hotfix_name.length() + 1);
if (hotfix_name.length() > 0) {
strcpy((char *) pack.pBuffer, hotfix_name.c_str());
}
worldserver.SendPacket(&pack);
if (c) { c->Message(Chat::White, "Hotfix applied"); }
}
);
t1.detach();
}
void command_load_shared_memory(Client *c, const Seperator *sep) {
std::string hotfix;
database.GetVariable("hotfix_name", hotfix);
std::string hotfix_name;
if(strcasecmp(hotfix.c_str(), sep->arg[1]) == 0) {
c->Message(Chat::White, "Cannot attempt to load this shared memory segment as it is already loaded.");
return;
}
hotfix_name = sep->arg[1];
c->Message(Chat::White, "Loading shared memory segment %s", hotfix_name.c_str());
std::thread t1([c,hotfix_name]() {
#ifdef WIN32
if(hotfix_name.length() > 0) {
if(system(StringFormat("shared_memory -hotfix=%s", hotfix_name.c_str()).c_str()));
} else {
if(system(StringFormat("shared_memory").c_str()));
}
#else
if(hotfix_name.length() > 0) {
if(system(StringFormat("./shared_memory -hotfix=%s", hotfix_name.c_str()).c_str()));
}
else {
if(system(StringFormat("./shared_memory").c_str()));
}
#endif
c->Message(Chat::White, "Shared memory segment finished loading.");
});
t1.detach();
}
void command_apply_shared_memory(Client *c, const Seperator *sep) {
std::string hotfix;
database.GetVariable("hotfix_name", hotfix);
std::string hotfix_name = sep->arg[1];
c->Message(Chat::White, "Applying shared memory segment %s", hotfix_name.c_str());
database.SetVariable("hotfix_name", hotfix_name);
ServerPacket pack(ServerOP_ChangeSharedMem, hotfix_name.length() + 1);
if(hotfix_name.length() > 0) {
strcpy((char*)pack.pBuffer, hotfix_name.c_str());
}
worldserver.SendPacket(&pack);
}
void command_reloadperlexportsettings(Client *c, const Seperator *sep)
{
if (c)
{
auto pack = new ServerPacket(ServerOP_ReloadPerlExportSettings, 0);
worldserver.SendPacket(pack);
c->Message(Chat::Red, "Successfully sent the packet to world to reload Perl Export settings");
safe_delete(pack);
}
}
void command_trapinfo(Client *c, const Seperator *sep)
{
entity_list.GetTrapInfo(c);
}
void command_reloadtraps(Client *c, const Seperator *sep)
{
entity_list.UpdateAllTraps(true, true);
c->Message(Chat::Default, "Traps reloaded for %s.", zone->GetShortName());
}
void command_scale(Client *c, const Seperator *sep)
{
if (sep->argnum == 0) {
c->Message(Chat::Yellow, "# Usage # ");
c->Message(Chat::Yellow, "#scale [static/dynamic] (With targeted NPC)");
c->Message(Chat::Yellow, "#scale [npc_name_search] [static/dynamic] (To make zone-wide changes)");
c->Message(Chat::Yellow, "#scale all [static/dynamic]");
return;
}
/**
* Targeted changes
*/
if (c->GetTarget() && c->GetTarget()->IsNPC() && sep->argnum < 2) {
NPC * npc = c->GetTarget()->CastToNPC();
bool apply_status = false;
if (strcasecmp(sep->arg[1], "dynamic") == 0) {
c->Message(Chat::Yellow, "Applying global base scaling to npc dynamically (All stats set to zeroes)...");
apply_status = npc_scale_manager->ApplyGlobalBaseScalingToNPCDynamically(npc);
}
else if (strcasecmp(sep->arg[1], "static") == 0) {
c->Message(Chat::Yellow, "Applying global base scaling to npc statically (Copying base stats onto NPC)...");
apply_status = npc_scale_manager->ApplyGlobalBaseScalingToNPCStatically(npc);
}
else {
return;
}
if (apply_status) {
c->Message(Chat::Yellow, "Applied to NPC '%s' successfully!", npc->GetName());
}
else {
c->Message(Chat::Yellow, "Failed to load scaling data from the database "
"for this npc / type, see 'NPCScaling' log for more info");
}
}
else if (c->GetTarget() && sep->argnum < 2) {
c->Message(Chat::Yellow, "Target must be an npc!");
}
/**
* Zonewide
*/
if (sep->argnum > 1) {
std::string scale_type;
if (strcasecmp(sep->arg[2], "dynamic") == 0) {
scale_type = "dynamic";
}
else if (strcasecmp(sep->arg[2], "static") == 0) {
scale_type = "static";
}
if (scale_type.length() <= 0) {
c->Message(Chat::Yellow, "You must first set if you intend on using static versus dynamic for these changes");
c->Message(Chat::Yellow, "#scale [npc_name_search] [static/dynamic]");
c->Message(Chat::Yellow, "#scale all [static/dynamic]");
return;
}
std::string search_string = sep->arg[1];
auto &entity_list_search = entity_list.GetNPCList();
int found_count = 0;
for (auto &itr : entity_list_search) {
NPC *entity = itr.second;
std::string entity_name = entity->GetName();
/**
* Filter by name
*/
if (search_string.length() > 0 && entity_name.find(search_string) == std::string::npos && strcasecmp(sep->arg[1], "all") != 0) {
continue;
}
std::string status = "(Searching)";
if (strcasecmp(sep->arg[3], "apply") == 0) {
status = "(Applying)";
if (strcasecmp(sep->arg[2], "dynamic") == 0) {
npc_scale_manager->ApplyGlobalBaseScalingToNPCDynamically(entity);
}
if (strcasecmp(sep->arg[2], "static") == 0) {
npc_scale_manager->ApplyGlobalBaseScalingToNPCStatically(entity);
}
}
c->Message(
15,
"| ID %5d | %s | x %.0f | y %0.f | z %.0f | DBID %u %s",
entity->GetID(),
entity->GetName(),
entity->GetX(),
entity->GetY(),
entity->GetZ(),
entity->GetNPCTypeID(),
status.c_str()
);
found_count++;
}
if (strcasecmp(sep->arg[3], "apply") == 0) {
c->Message(Chat::Yellow, "%s scaling applied against (%i) NPC's", sep->arg[2], found_count);
}
else {
std::string saylink = StringFormat(
"#scale %s %s apply",
sep->arg[1],
sep->arg[2]
);
c->Message(Chat::Yellow, "Found (%i) NPC's that match this search...", found_count);
c->Message(
Chat::Yellow, "To apply these changes, click <%s> or type %s",
EQEmu::SayLinkEngine::GenerateQuestSaylink(saylink, false, "Apply").c_str(),
saylink.c_str()
);
}
}
}
void command_databuckets(Client *c, const Seperator *sep)
{
if (sep->arg[1][0] == 0) {
c->Message(Chat::Yellow, "Usage: #databuckets view (partial key)|(limit) OR #databuckets delete (key)");
return;
}
if (strcasecmp(sep->arg[1], "view") == 0) {
std::string key_filter;
uint8 limit = 50;
for (int i = 2; i < 4; i++) {
if (sep->arg[i][0] == '\0')
break;
if (strcasecmp(sep->arg[i], "limit") == 0) {
limit = (uint8)atoi(sep->arg[i + 1]);
continue;
}
}
if (sep->arg[2]) {
key_filter = str_tolower(sep->arg[2]);
}
std::string query = "SELECT `id`, `key`, `value`, `expires` FROM data_buckets";
if (!key_filter.empty()) query += StringFormat(" WHERE `key` LIKE '%%%s%%'", key_filter.c_str());
query += StringFormat(" LIMIT %u", limit);
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
if (results.RowCount() == 0) {
c->Message(Chat::Yellow, "No data_buckets found");
return;
}
int _ctr = 0;
// put in window for easier readability in case want command line for something else
std::string window_title = "Data Buckets";
std::string window_text =
"<table>"
"<tr>"
"<td>ID</td>"
"<td>Expires</td>"
"<td>Key</td>"
"<td>Value</td>"
"</tr>";
for (auto row = results.begin(); row != results.end(); ++row) {
auto id = static_cast<uint32>(atoi(row[0]));
std::string key = row[1];
std::string value = row[2];
std::string expires = row[3];
window_text.append(StringFormat(
"<tr>"
"<td>%u</td>"
"<td>%s</td>"
"<td>%s</td>"
"<td>%s</td>"
"</tr>",
id,
expires.c_str(),
key.c_str(),
value.c_str()
));
_ctr++;
std::string del_saylink = StringFormat("#databuckets delete %s", key.c_str());
c->Message(Chat::White, "%s : %s",
EQEmu::SayLinkEngine::GenerateQuestSaylink(del_saylink, false, "Delete").c_str(), key.c_str(), " Value: ", value.c_str());
}
window_text.append("</table>");
c->SendPopupToClient(window_title.c_str(), window_text.c_str());
std::string response = _ctr > 0 ? StringFormat("Found %i matching data buckets", _ctr).c_str() : "No Databuckets found.";
c->Message(Chat::Yellow, response.c_str());
}
else if (strcasecmp(sep->arg[1], "delete") == 0)
{
if (DataBucket::DeleteData(sep->argplus[2]))
c->Message(Chat::Yellow, "data bucket %s deleted.", sep->argplus[2]);
else
c->Message(Chat::Red, "An error occurred deleting data bucket %s", sep->argplus[2]);
return;
}
}
void command_who(Client *c, const Seperator *sep)
{
std::string query =
"SELECT\n"
" character_data.account_id,\n"
" character_data.name,\n"
" character_data.zone_id,\n"
" COALESCE((select zone.short_name from zone where zoneidnumber = character_data.zone_id LIMIT 1), \"Not Found\") as zone_name,\n"
" character_data.zone_instance,\n"
" COALESCE((select guilds.name from guilds where id = ((select guild_id from guild_members where char_id = character_data.id))), \"\") as guild_name,\n"
" character_data.level,\n"
" character_data.race,\n"
" character_data.class,\n"
" COALESCE((select account.status from account where account.id = character_data.account_id LIMIT 1), 0) as account_status,\n"
" COALESCE((select account.name from account where account.id = character_data.account_id LIMIT 1), \"\") as account_name,\n"
" COALESCE((select account_ip.ip from account_ip where account_ip.accid = character_data.account_id ORDER BY account_ip.lastused DESC LIMIT 1), \"\") as account_ip\n"
"FROM\n"
" character_data\n"
"WHERE\n"
" last_login > (UNIX_TIMESTAMP() - 600)\n"
"ORDER BY character_data.name;";
auto results = database.QueryDatabase(query);
if (!results.Success())
return;
if (results.RowCount() == 0) {
c->Message(Chat::Yellow, "No results found");
return;
}
std::string search_string;
if (sep->arg[1]) {
search_string = str_tolower(sep->arg[1]);
}
int found_count = 0;
c->Message(Chat::Magenta, "Players in EverQuest");
c->Message(Chat::Magenta, "--------------------");
for (auto row = results.begin(); row != results.end(); ++row) {
auto account_id = static_cast<uint32>(atoi(row[0]));
std::string player_name = row[1];
auto zone_id = static_cast<uint32>(atoi(row[2]));
std::string zone_short_name = row[3];
auto zone_instance = static_cast<uint32>(atoi(row[4]));
std::string guild_name = row[5];
auto player_level = static_cast<uint32>(atoi(row[6]));
auto player_race = static_cast<uint32>(atoi(row[7]));
auto player_class = static_cast<uint32>(atoi(row[8]));
auto account_status = static_cast<uint32>(atoi(row[9]));
std::string account_name = row[10];
std::string account_ip = row[11];
std::string base_class_name = GetClassIDName(static_cast<uint8>(player_class), 1);
std::string displayed_race_name = GetRaceIDName(static_cast<uint16>(player_race));
if (search_string.length() > 0) {
bool found_search_term =
(
str_tolower(player_name).find(search_string) != std::string::npos ||
str_tolower(zone_short_name).find(search_string) != std::string::npos ||
str_tolower(displayed_race_name).find(search_string) != std::string::npos ||
str_tolower(base_class_name).find(search_string) != std::string::npos ||
str_tolower(guild_name).find(search_string) != std::string::npos ||
str_tolower(account_name).find(search_string) != std::string::npos ||
str_tolower(account_ip).find(search_string) != std::string::npos
);
if (!found_search_term) {
continue;
}
}
std::string displayed_guild_name;
if (guild_name.length() > 0) {
displayed_guild_name = EQEmu::SayLinkEngine::GenerateQuestSaylink(
StringFormat(
"#who \"%s\"",
guild_name.c_str()),
false,
StringFormat("<%s>", guild_name.c_str())
);
}
std::string goto_saylink = EQEmu::SayLinkEngine::GenerateQuestSaylink(
StringFormat("#goto %s", player_name.c_str()), false, "Goto"
);
std::string display_class_name = GetClassIDName(static_cast<uint8>(player_class), static_cast<uint8>(player_level));
c->Message(
5, "%s[%u %s] %s (%s) %s ZONE: %s (%u) (%s) (%s) (%s)",
(account_status > 0 ? "* GM * " : ""),
player_level,
EQEmu::SayLinkEngine::GenerateQuestSaylink(StringFormat("#who %s", base_class_name.c_str()), false, display_class_name).c_str(),
player_name.c_str(),
EQEmu::SayLinkEngine::GenerateQuestSaylink(StringFormat("#who %s", displayed_race_name.c_str()), false, displayed_race_name).c_str(),
displayed_guild_name.c_str(),
EQEmu::SayLinkEngine::GenerateQuestSaylink(StringFormat("#who %s", zone_short_name.c_str()), false, zone_short_name).c_str(),
zone_instance,
goto_saylink.c_str(),
EQEmu::SayLinkEngine::GenerateQuestSaylink(StringFormat("#who %s", account_name.c_str()), false, account_name).c_str(),
EQEmu::SayLinkEngine::GenerateQuestSaylink(StringFormat("#who %s", account_ip.c_str()), false, account_ip).c_str()
);
found_count++;
}
std::string message = (
found_count > 0 ?
StringFormat("There is %i player(s) in EverQuest", found_count).c_str() :
"There are no players in EverQuest that match those who filters."
);
c->Message(Chat::Magenta, message.c_str());
}
void command_network(Client *c, const Seperator *sep)
{
if (!strcasecmp(sep->arg[1], "getopt"))
{
auto eqsi = c->Connection();
auto manager = eqsi->GetManager();
auto opts = manager->GetOptions();
if (!strcasecmp(sep->arg[2], "all"))
{
c->Message(Chat::White, "max_packet_size: %llu", (uint64_t)opts.daybreak_options.max_packet_size);
c->Message(Chat::White, "max_connection_count: %llu", (uint64_t)opts.daybreak_options.max_connection_count);
c->Message(Chat::White, "keepalive_delay_ms: %llu", (uint64_t)opts.daybreak_options.keepalive_delay_ms);
c->Message(Chat::White, "resend_delay_factor: %.2f", opts.daybreak_options.resend_delay_factor);
c->Message(Chat::White, "resend_delay_ms: %llu", (uint64_t)opts.daybreak_options.resend_delay_ms);
c->Message(Chat::White, "resend_delay_min: %llu", (uint64_t)opts.daybreak_options.resend_delay_min);
c->Message(Chat::White, "resend_delay_max: %llu", (uint64_t)opts.daybreak_options.resend_delay_max);
c->Message(Chat::White, "connect_delay_ms: %llu", (uint64_t)opts.daybreak_options.connect_delay_ms);
c->Message(Chat::White, "connect_stale_ms: %llu", (uint64_t)opts.daybreak_options.connect_stale_ms);
c->Message(Chat::White, "stale_connection_ms: %llu", (uint64_t)opts.daybreak_options.stale_connection_ms);
c->Message(Chat::White, "crc_length: %llu", (uint64_t)opts.daybreak_options.crc_length);
c->Message(Chat::White, "hold_size: %llu", (uint64_t)opts.daybreak_options.hold_size);
c->Message(Chat::White, "hold_length_ms: %llu", (uint64_t)opts.daybreak_options.hold_length_ms);
c->Message(Chat::White, "simulated_in_packet_loss: %llu", (uint64_t)opts.daybreak_options.simulated_in_packet_loss);
c->Message(Chat::White, "simulated_out_packet_loss: %llu", (uint64_t)opts.daybreak_options.simulated_out_packet_loss);
c->Message(Chat::White, "tic_rate_hertz: %.2f", opts.daybreak_options.tic_rate_hertz);
c->Message(Chat::White, "resend_timeout: %llu", (uint64_t)opts.daybreak_options.resend_timeout);
c->Message(Chat::White, "connection_close_time: %llu", (uint64_t)opts.daybreak_options.connection_close_time);
c->Message(Chat::White, "encode_passes[0]: %llu", (uint64_t)opts.daybreak_options.encode_passes[0]);
c->Message(Chat::White, "encode_passes[1]: %llu", (uint64_t)opts.daybreak_options.encode_passes[1]);
c->Message(Chat::White, "port: %llu", (uint64_t)opts.daybreak_options.port);
}
else {
c->Message(Chat::White, "Unknown get option: %s", sep->arg[2]);
c->Message(Chat::White, "Available options:");
//Todo the rest of these when im less lazy.
//c->Message(Chat::White, "max_packet_size");
//c->Message(Chat::White, "max_connection_count");
//c->Message(Chat::White, "keepalive_delay_ms");
//c->Message(Chat::White, "resend_delay_factor");
//c->Message(Chat::White, "resend_delay_ms");
//c->Message(Chat::White, "resend_delay_min");
//c->Message(Chat::White, "resend_delay_max");
//c->Message(Chat::White, "connect_delay_ms");
//c->Message(Chat::White, "connect_stale_ms");
//c->Message(Chat::White, "stale_connection_ms");
//c->Message(Chat::White, "crc_length");
//c->Message(Chat::White, "hold_size");
//c->Message(Chat::White, "hold_length_ms");
//c->Message(Chat::White, "simulated_in_packet_loss");
//c->Message(Chat::White, "simulated_out_packet_loss");
//c->Message(Chat::White, "tic_rate_hertz");
//c->Message(Chat::White, "resend_timeout");
//c->Message(Chat::White, "connection_close_time");
//c->Message(Chat::White, "encode_passes[0]");
//c->Message(Chat::White, "encode_passes[1]");
//c->Message(Chat::White, "port");
c->Message(Chat::White, "all");
}
}
else if (!strcasecmp(sep->arg[1], "setopt"))
{
auto eqsi = c->Connection();
auto manager = eqsi->GetManager();
auto opts = manager->GetOptions();
if (!strcasecmp(sep->arg[3], ""))
{
c->Message(Chat::White, "Missing value for set");
return;
}
std::string value = sep->arg[3];
if (!strcasecmp(sep->arg[2], "max_connection_count"))
{
opts.daybreak_options.max_connection_count = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "keepalive_delay_ms"))
{
opts.daybreak_options.keepalive_delay_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "resend_delay_factor"))
{
opts.daybreak_options.resend_delay_factor = std::stod(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "resend_delay_ms"))
{
opts.daybreak_options.resend_delay_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "resend_delay_min"))
{
opts.daybreak_options.resend_delay_min = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "resend_delay_max"))
{
opts.daybreak_options.resend_delay_max = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "connect_delay_ms"))
{
opts.daybreak_options.connect_delay_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "connect_stale_ms"))
{
opts.daybreak_options.connect_stale_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "stale_connection_ms"))
{
opts.daybreak_options.stale_connection_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "hold_size"))
{
opts.daybreak_options.hold_size = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "hold_length_ms"))
{
opts.daybreak_options.hold_length_ms = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "simulated_in_packet_loss"))
{
opts.daybreak_options.simulated_in_packet_loss = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "simulated_out_packet_loss"))
{
opts.daybreak_options.simulated_out_packet_loss = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "resend_timeout"))
{
opts.daybreak_options.resend_timeout = std::stoull(value);
manager->SetOptions(opts);
}
else if (!strcasecmp(sep->arg[2], "connection_close_time"))
{
opts.daybreak_options.connection_close_time = std::stoull(value);
manager->SetOptions(opts);
}
else {
c->Message(Chat::White, "Unknown set option: %s", sep->arg[2]);
c->Message(Chat::White, "Available options:");
c->Message(Chat::White, "max_connection_count");
c->Message(Chat::White, "keepalive_delay_ms");
c->Message(Chat::White, "resend_delay_factor");
c->Message(Chat::White, "resend_delay_ms");
c->Message(Chat::White, "resend_delay_min");
c->Message(Chat::White, "resend_delay_max");
c->Message(Chat::White, "connect_delay_ms");
c->Message(Chat::White, "connect_stale_ms");
c->Message(Chat::White, "stale_connection_ms");
c->Message(Chat::White, "hold_size");
c->Message(Chat::White, "hold_length_ms");
c->Message(Chat::White, "simulated_in_packet_loss");
c->Message(Chat::White, "simulated_out_packet_loss");
c->Message(Chat::White, "resend_timeout");
c->Message(Chat::White, "connection_close_time");
}
}
else {
c->Message(Chat::White, "Unknown command: %s", sep->arg[1]);
c->Message(Chat::White, "Network commands avail:");
c->Message(Chat::White, "getopt optname - Retrieve the current option value set.");
c->Message(Chat::White, "setopt optname - Set the current option allowed.");
}
}
// All new code added to command.cpp should be BEFORE this comment line. Do no append code to this file below the BOTS code block.
#ifdef BOTS
#include "bot_command.h"
// Function delegate to support the command interface for Bots with the client.
void command_bot(Client *c, const Seperator *sep)
{
std::string bot_message = sep->msg;
if (bot_message.compare("#bot") == 0) {
bot_message[0] = BOT_COMMAND_CHAR;
}
else {
bot_message = bot_message.substr(bot_message.find_first_not_of("#bot"));
bot_message[0] = BOT_COMMAND_CHAR;
}
if (bot_command_dispatch(c, bot_message.c_str()) == -2) {
if (parse->PlayerHasQuestSub(EVENT_COMMAND)) {
int i = parse->EventPlayer(EVENT_COMMAND, c, bot_message, 0);
if (i == 0 && !RuleB(Chat, SuppressCommandErrors)) {
c->Message(Chat::Red, "Bot command '%s' not recognized.", bot_message.c_str());
}
}
else {
if (!RuleB(Chat, SuppressCommandErrors))
c->Message(Chat::Red, "Bot command '%s' not recognized.", bot_message.c_str());
}
}
}
#endif
| 1 | 9,807 | I know this is legal, but I prefer we put brackets on our calls | EQEmu-Server | cpp |
@@ -1,4 +1,4 @@
-//snippet-sourcedescription:[DeleteServerCertificate.java demonstrates how to delete an AWS Identity and Access Management (IAM) server certificate.]
+//snippet-sourcedescription:[DeleteServerCertificate.java demonstrates how to delete an AWS Identity and Access Management (AWS IAM) server certificate.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM] | 1 | //snippet-sourcedescription:[DeleteServerCertificate.java demonstrates how to delete an AWS Identity and Access Management (IAM) server certificate.]
//snippet-keyword:[AWS SDK for Java v2]
//snippet-keyword:[Code Sample]
//snippet-service:[AWS IAM]
//snippet-sourcetype:[full-example]
//snippet-sourcedate:[11/02/2020]
//snippet-sourceauthor:[scmacdon-aws]
/*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package com.example.iam;
// snippet-start:[iam.java2.delete_server_certificate.import]
import software.amazon.awssdk.services.iam.model.DeleteServerCertificateRequest;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.iam.IamClient;
import software.amazon.awssdk.services.iam.model.IamException;
// snippet-end:[iam.java2.delete_server_certificate.import]
public class DeleteServerCertificate {
public static void main(String[] args) {
final String USAGE = "\n" +
"Usage:\n" +
" DeleteServerCertificate <certName> \n\n" +
"Where:\n" +
" certName - a certificate name to delete. \n\n" ;
if (args.length != 1) {
System.out.println(USAGE);
System.exit(1);
}
// Read the command line argument
String certName = args[0];
Region region = Region.AWS_GLOBAL;
IamClient iam = IamClient.builder()
.region(region)
.build();
deleteCert(iam, certName) ;
System.out.println("Done");
iam.close();
}
// snippet-start:[iam.java2.delete_server_certificate.main]
public static void deleteCert(IamClient iam,String certName ) {
try {
DeleteServerCertificateRequest request =
DeleteServerCertificateRequest.builder()
.serverCertificateName(certName)
.build();
iam.deleteServerCertificate(request);
System.out.println("Successfully deleted server certificate " +
certName);
} catch (IamException e) {
System.err.println(e.awsErrorDetails().errorMessage());
System.exit(1);
}
}
// snippet-end:[iam.java2.delete_server_certificate.main]
}
| 1 | 18,237 | AWS Identity and Access Management (IAM) | awsdocs-aws-doc-sdk-examples | rb |
@@ -0,0 +1,18 @@
+package org.openqa.selenium;
+
+/**
+ * Created by James Reed on 11/04/2016.
+ * Thrown to indicate that a click was attempted on an element but was intercepted by another
+ * element on top of it
+ */
+public class InterceptingElementException extends InvalidElementStateException {
+
+ public InterceptingElementException(String message) {
+ super(message);
+ }
+
+ public InterceptingElementException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+} | 1 | 1 | 13,182 | We keep who wrote the code anonymous. | SeleniumHQ-selenium | py |
|
@@ -135,10 +135,14 @@ export const readableLargeNumber = ( number, currencyCode = false ) => {
if ( false !== currencyCode && '' !== readableNumber ) {
const formatedParts = new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).formatToParts( number );
- const decimal = formatedParts.find( part => 'decimal' === part.type ).value;
+ let decimal = formatedParts.find( part => 'decimal' === part.type );
+ if ( ! isUndefined( decimal ) ) {
+ decimal = decimal.value;
+ }
+
const currency = formatedParts.find( part => 'currency' === part.type ).value;
- if ( 1000 > number ) {
+ if ( 1000 > number && ! isUndefined( decimal ) ) {
readableNumber = Number.isInteger( number ) ? number : number.replace( '.', decimal );
}
| 1 | /**
* Utility functions.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import data from 'GoogleComponents/data';
import SvgIcon from 'GoogleUtil/svg-icon';
const {
addAction,
addFilter,
applyFilters,
} = wp.hooks;
const {
map,
isNull,
isUndefined,
indexOf,
unescape,
deburr,
toLower,
trim,
} = lodash;
const {
_n,
sprintf,
} = wp.i18n;
const { addQueryArgs, getQueryString } = wp.url;
const { __ } = wp.i18n;
/**
* Remove a parameter from a URL string.
*
* Fallback for when URL is unable to handle this.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
const removeURLFallBack = ( url, parameter ) => {
const urlparts = url.split( '?' );
if ( 2 <= urlparts.length ) {
const prefix = encodeURIComponent( parameter ) + '=';
const pars = urlparts[1].split( /[&;]/g );
//reverse iteration as may be destructive
const newPars = pars.filter( param => {
return -1 === param.lastIndexOf( prefix, 0 );
} );
url = urlparts[0] + '/' + ( 0 < newPars.length ? '?' + newPars.join( '&' ) : '' );
return url;
} else {
return url;
}
};
/**
* Remove a parameter from a URL string.
*
* Leverages the URL object internally.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
export const removeURLParameter = ( url, parameter ) => {
const parsedUrl = new URL( url );
// If the URL implementation doesn't support ! parsedUrl.searchParams, use the fallback handler.
if ( ! parsedUrl.searchParams || ! parsedUrl.searchParams.delete ) {
return removeURLFallBack( url, parameter );
}
parsedUrl.searchParams.delete( parameter );
return parsedUrl.href;
};
/**
* Format a large number for shortened display.
*
* @param {number} number The large number to format.
* @param {string|boolean} currencyCode Optional currency code to format as amount.
*
* @returns {string} The formatted number.
*/
export const readableLargeNumber = ( number, currencyCode = false ) => {
let readableNumber;
// Handle passed data undefined.
if ( isUndefined( number ) ) {
readableNumber = 0;
} else if ( 1000000 < number ) {
number = number / 1000000;
readableNumber = number.toFixed( 1 ) + 'M';
} else if ( 1000 < number ) {
number = number / 1000;
if ( 99 < number ) {
readableNumber = Math.round( number ) + 'K';
} else {
readableNumber = number.toFixed( 1 ) + 'K';
}
} else {
readableNumber = number;
}
// Handle errors after calculations.
if ( isNull( number ) || isUndefined( number ) || isNaN( number ) ) {
readableNumber = '';
number = 0;
}
if ( 0 === number ) {
readableNumber = '0.00';
return currencyCode ?
new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).format( number ) :
number;
}
// Format as amount if currencyCode is passed.
if ( false !== currencyCode && '' !== readableNumber ) {
const formatedParts = new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).formatToParts( number );
const decimal = formatedParts.find( part => 'decimal' === part.type ).value;
const currency = formatedParts.find( part => 'currency' === part.type ).value;
if ( 1000 > number ) {
readableNumber = Number.isInteger( number ) ? number : number.replace( '.', decimal );
}
return `${currency}${readableNumber}`;
}
return readableNumber;
};
/**
* Internationalization Number Format.
*
* @param {number} number The number to format.
* @param {string} locale Optional, locale to format as amount, default to Browser's locale.
*
* @returns {string} The formatted number.
*/
export const numberFormat = ( number, locale = '' ) => {
if ( ! locale ) {
locale = navigator.language;
}
// This line to make sure we use lower case local format, ex: en-us.
locale = locale.replace( '_', '-' ).toLocaleLowerCase();
return new Intl.NumberFormat( locale ).format( number );
};
/**
* Transform a period string into a number of seconds.
*
* @param {string} period The period to transform.
*
* @return {number} The number of seconds
*/
export const getTimeInSeconds = ( period ) => {
const minute = 60;
const hour = minute * 60;
const day = hour * 24;
const week = day * 7;
const month = day * 30;
const year = day * 365;
switch ( period ) {
case 'minute':
return minute;
case 'hour':
return hour;
case 'day':
return day;
case 'week':
return week;
case 'month':
return month;
case 'year':
return year;
}
};
/**
* Converts seconds to a display ready string indicating
* the number of hours, minutes and seconds that have elapsed.
*
* For example, passing 65 returns '1m 5s'.
*
* @param {int} seconds The number of seconds.
*/
export const prepareSecondsForDisplay = ( seconds ) => {
seconds = parseInt( seconds, 10 );
if ( isNaN( seconds ) || 0 === seconds ) {
return '0.0s';
}
const results = {};
results.hours = Math.floor( seconds / 60 / 60 );
results.minutes = Math.floor( ( seconds / 60 ) % 60 );
results.seconds = Math.floor( seconds % 60 );
const returnString =
( results.hours ? results.hours + 'h ' : '' ) +
( results.minutes ? results.minutes + 'm ' : '' ) +
( results.seconds ? results.seconds + 's ' : '' );
return returnString.trim();
};
/**
* Retrieve number of days between 2 dates.
*
* @param {object} dateStart
* @param {object} dateEnd
*
* @return {number} The number of days.
*/
export const getDaysBetweenDates = ( dateStart, dateEnd ) => {
const dayMs = 1000 * getTimeInSeconds( 'day' );
const dateStartMs = dateStart.getTime();
const dateEndMs = dateEnd.getTime();
return Math.round( Math.abs( dateStartMs - dateEndMs ) / dayMs );
};
/**
* Calculate the percent change between two values.
*
* @param {int} previous The previous value.
* @param {int} current The current value.
*
* @returns {int|string} The percent change.
*/
export const changeToPercent = ( previous, current ) => {
// Prevent divide by zero errors.
if ( '0' === previous || 0 === previous || isNaN( previous ) ) {
return '';
}
const change = ( ( current - previous ) / previous * 100 ).toFixed( 1 );
// Avoid NaN at all costs.
if ( isNaN( change ) || 'Infinity' === change ) {
return '';
}
return change;
};
export function addPerformanceMonitoring() {
let googlesitekitPerformance = window.googlesitekitPerformance || {};
addAction( 'googlesitekit.moduleLoaded', 'googlesitekit.PerformanceMetrics.moduleLoaded', function( context ) {
googlesitekitPerformance.loadedActionTriggered = ( new Date() ).getTime();
const elapsed = ( googlesitekitPerformance.loadedActionTriggered - googlesitekitPerformance.domReady ) + 'ms';
googlesitekitPerformance._timeToLoadedActionTriggered = elapsed;
googlesitekitPerformance.loadedActionContext = context;
console.log( 'Performance Metrics: App loaded', elapsed ); // eslint-disable-line no-console
} );
addAction( 'googlesitekit.dataReceived', 'googlesitekit.PerformanceMetrics.dataReceived', function( datapoint ) {
const currentlyAt = ( new Date() ).getTime();
googlesitekitPerformance.dataReceived = googlesitekitPerformance.dataReceived || [];
googlesitekitPerformance._timeToDataReceived = googlesitekitPerformance._timeToDataReceived || [];
googlesitekitPerformance.dataReceived.push( currentlyAt );
const elapsed = ( currentlyAt - googlesitekitPerformance.domReady ) + 'ms';
googlesitekitPerformance._timeToDataReceived.push( elapsed );
console.log( 'Performance Metrics: Async Data loaded: ' + datapoint, elapsed ); // eslint-disable-line no-console
} );
addAction( 'googlesitekit.cachedDataUsed', 'googlesitekit.PerformanceMetrics.cachedDataUsed', function( datapoint ) {
const currentlyAt = ( new Date() ).getTime();
googlesitekitPerformance.cachedDataUsed = googlesitekitPerformance.cachedDataUsed || [];
googlesitekitPerformance._timeToCachedDataUsed = googlesitekitPerformance._timeToCachedDataUsed || [];
googlesitekitPerformance.cachedDataUsed.push( currentlyAt );
const elapsed = ( currentlyAt - googlesitekitPerformance.domReady ) + 'ms';
googlesitekitPerformance._timeToCachedDataUsed.push( elapsed );
console.log( 'Performance Metrics: Cached Data loaded: ' + datapoint, elapsed ); // eslint-disable-line no-console
} );
addAction( 'googlesitekit.rootAppDidMount', 'googlesitekit.PerformanceMetrics.rootAppDidMount', function() {
googlesitekitPerformance.rootAppMounted = ( new Date() ).getTime();
const elapsed = ( googlesitekitPerformance.rootAppMounted - googlesitekitPerformance.domReady ) + 'ms';
googlesitekitPerformance._timeToAppMounted = elapsed;
console.log( 'Performance Metrics: App mounted', elapsed ); // eslint-disable-line no-console
} );
}
/**
* Fallback helper to get a query parameter from the current URL.
*
* Used when URL.searchParams is unavailable.
*
* @param {string} name Query param to search for.
* @returns {string}
*/
const fallbackGetQueryParamater = ( name ) => {
var queryDict = {},
i,
queries = location.search.substr( 1 ).split( '&' );
for ( i = 0; i < queries.length; i++ ) {
queryDict[queries[ i ].split( '=' )[ 0 ] ] = decodeURIComponent( queries[ i ].split( '=' )[ 1 ] );
}
// If the name is specified, return that specific get parameter
if ( name ) {
return queryDict.hasOwnProperty( name ) ? decodeURIComponent( queryDict[ name ].replace( /\+/g, ' ' ) ) : '';
}
return queryDict;
};
/**
* Get query parameter from the current URL.
*
* @param {string} name Query param to search for.
* @returns {string}
*/
export const getQueryParameter = ( name ) => {
const url = new URL( location.href );
if ( name ) {
if ( ! url.searchParams || ! url.searchParams.get ) {
return fallbackGetQueryParamater( name );
}
return url.searchParams.get( name );
}
const query = {};
for ( const [ key, value ] of url.searchParams.entries() ) {
query[ key ] = value;
}
return query;
};
/**
* Extract a single column of data for a sparkline from a dataset prepared for google charts.
*
* @param {array} data An array of google charts row data.
* @param {Number} column The column to extract for the sparkline.
*/
export const extractForSparkline = ( data, column ) => {
return map( data, ( row, i ) => {
return [
row[0], // row[0] always contains the x axis value (typically date).
row[ column ] ? row[ column ] : ( 0 === i ? '' : 0 ), // the data for the sparkline.
];
} );
};
export const refreshAuthentication = async() => {
try {
const response = await data.get( 'core', 'user', 'authentication' );
const requiredAndGrantedScopes = response.grantedScopes.filter( scope => {
return -1 !== response.requiredScopes.indexOf( scope );
} );
// We should really be using state management. This is terrible.
window.googlesitekit.setup = window.googlesitekit.setup || {};
window.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
window.googlesitekit.setup.requiredScopes = response.requiredScopes;
window.googlesitekit.setup.grantedScopes = response.grantedScopes;
window.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
} catch ( e ) { // eslint-disable-line no-empty
}
};
/**
* Get the URL needed to initiate a reAuth flow.
*
* @param {string} slug The module slug. If included redirect URL will include page: page={ `googlesitekit-${slug}`}.
* @param {boolean} status The module activation status.
*/
export const getReAuthUrl = ( slug, status ) => {
const {
connectUrl,
adminRoot,
apikey,
} = googlesitekit.admin;
const { needReauthenticate } = window.googlesitekit.setup;
let { screenId } = googlesitekit.modules[ slug ];
// For PageSpeedInsights, there is no setup needed if an API key already exists.
const reAuth = ( 'pagespeed-insights' === slug && apikey && apikey.length ) ? false : status;
let redirect = addQueryArgs(
adminRoot, {
// If the module has a submenu page, and is being activated, redirect back to the module page.
page: ( slug && status && screenId ) ? screenId : 'googlesitekit-dashboard',
reAuth,
slug,
}
);
if ( ! needReauthenticate ) {
return redirect;
}
// Encodes the query string to ensure the redirect url is not messing up with the main url.
const queryString = encodeURIComponent( getQueryString( redirect ) );
// Rebuild the redirect url.
redirect = adminRoot + '?' + queryString;
return addQueryArgs(
connectUrl, {
redirect,
status,
}
);
};
/**
* Replace a filtered component with the passed component and merge their props.
*
* Components wrapped in the 'withFilters' higher order component have a filter applied to them (wp.hooks.applyFilters).
* This helper is used to replace (or "Fill") a filtered component with a passed component. To use, pass as the third
* argument to an addFilter call, eg:
*
* addFilter( `googlesitekit.ModuleSettingsDetails-${slug}`,
* 'googlesitekit.AdSenseModuleSettingsDetails',
* fillFilterWithComponent( AdSenseSettings, {
* onSettingsPage: true,
* } ) );
*
* @param {Component} NewComponent The component to render in place of the filtered component.
* @param {object} newProps The props to pass down to the new component.
*/
export const fillFilterWithComponent = ( NewComponent, newProps ) => {
return ( OriginalComponent ) => {
return function InnerComponent( props ) {
return (
<NewComponent { ...props } { ...newProps } OriginalComponent={ OriginalComponent } />
);
};
};
};
/**
* Get Site Kit Admin URL Helper
*
* @param { string } page The page slug. Optional. Default is 'googlesitekit-dashboard'.
* @param { object } args Optional. Object of argiments to add to the URL.
*
* @returns string
*/
export const getSiteKitAdminURL = ( page, args ) => {
const { adminRoot } = googlesitekit.admin;
if ( ! page ) {
page = 'googlesitekit-dashboard';
}
args = { page, ...args };
return addQueryArgs( adminRoot, args );
};
/**
* Verifies if the Front End site has been loaded in the iframe to check for tag presence.
*
* @return mixed Returns the iframe if it's loaded, false if not loaded.
*/
export const isFrontendIframeLoaded = () => {
const iframe = document.getElementById( 'sitekit_fe_load_check' );
if ( iframe ) {
return iframe;
}
return false;
};
/**
* Verifies whether JSON is valid.
*
* @param { string } stringToValidate The string to validate.
*
* @returns boolean Whether JSON is valid.
*/
export const validateJSON = ( stringToValidate ) => {
try {
return ( JSON.parse( stringToValidate ) && !! stringToValidate );
} catch ( e ) {
return false;
}
};
/**
* Verifies Optimize ID
*
* @param { string } stringToValidate The string to validate.
*
* @returns boolean
*/
export const validateOptimizeID = ( stringToValidate ) => {
return ( stringToValidate.match( /^GTM-[a-zA-Z\d]{7}$/ ) );
};
/**
* Appends a notification count icon to the Site Kit dashboard menu/admin bar when
* user is outside the Site Kit app.
*
* Retrieves the number from local storage previously stored by NotificationCounter
* used in googlesitekit-admin.js
*/
export const appendNotificationsCount = ( count = 0 ) => {
let menuSelector = null;
let adminbarSelector = null;
const counterMenu = document.querySelector( '#toplevel_page_googlesitekit-dashboard #googlesitekit-notifications-counter' );
const counterAdminbar = document.querySelector( '#wp-admin-bar-google-site-kit #googlesitekit-notifications-counter' );
if ( counterMenu && counterAdminbar ) {
return false;
}
menuSelector = document.querySelector( '#toplevel_page_googlesitekit-dashboard .wp-menu-name' );
adminbarSelector = document.querySelector( '#wp-admin-bar-google-site-kit .ab-item' );
if ( null === menuSelector && null === adminbarSelector ) {
return false;
}
const wrapper = document.createElement( 'span' );
wrapper.setAttribute( 'class', `googlesitekit-notifications-counter update-plugins count-${count}` );
wrapper.setAttribute( 'id', 'googlesitekit-notifications-counter' );
const pluginCount = document.createElement( 'span' );
pluginCount.setAttribute( 'class', 'plugin-count' );
pluginCount.setAttribute( 'aria-hidden', 'true' );
pluginCount.textContent = count;
const screenReader = document.createElement( 'span' );
screenReader.setAttribute( 'class', 'screen-reader-text' );
screenReader.textContent = sprintf(
_n(
'%d notification',
'%d notifications',
count,
'google-site-kit'
),
count
);
wrapper.appendChild( pluginCount );
wrapper.appendChild( screenReader );
if ( menuSelector && null === counterMenu ) {
menuSelector.appendChild( wrapper );
}
if ( adminbarSelector && null === counterAdminbar ) {
adminbarSelector.appendChild( wrapper );
}
return wrapper;
};
/**
* Send an analytics tracking event.
*
* @param {string} eventCategory The event category. Required.
* @param {string} eventName The event category. Required.
* @param {string} eventLabel The event category. Optional.
* @param {string} eventValue The event category. Optional.
*
*/
export const sendAnalyticsTrackingEvent = ( eventCategory, eventName, eventLabel = '', eventValue = '' ) => {
if ( 'undefined' === typeof gtag ) {
return;
}
const {
siteURL,
siteUserId,
} = googlesitekit.admin;
const { isFirstAdmin } = googlesitekit.setup;
const { trimEnd } = lodash;
if ( googlesitekit.admin.trackingOptin ) {
return gtag( 'event', eventName, {
send_to: googlesitekit.admin.trackingID, /*eslint camelcase: 0*/
event_category: eventCategory, /*eslint camelcase: 0*/
event_label: eventLabel, /*eslint camelcase: 0*/
event_value: eventValue, /*eslint camelcase: 0*/
dimension1: trimEnd( siteURL, '/' ), // Domain.
dimension2: isFirstAdmin ? 'true' : 'false', // First Admin?
dimension3: siteUserId, // Identifier.
} );
}
};
/**
* Detect whether browser storage is both supported and available.
*
* @param {string} type Browser storage to test. ex localStorage or sessionStorage.
* @returns {boolean}
*/
export const storageAvailable = ( type ) => {
const storage = window[type];
if ( ! storage ) {
return false;
}
try {
const x = '__storage_test__';
storage.setItem( x, x );
storage.removeItem( x );
return true;
} catch ( e ) {
return e instanceof DOMException && (
// everything except Firefox
22 === e.code ||
// Firefox
1014 === e.code ||
// test name field too, because code might not be present
// everything except Firefox
'QuotaExceededError' === e.name ||
// Firefox
'NS_ERROR_DOM_QUOTA_REACHED' === e.name ) &&
// acknowledge QuotaExceededError only if there's something already stored
0 !== storage.length;
}
};
/**
* Set Cache to Browser Storage.
*
* @param {string} cacheType Browser storage.
* @param {string} cacheKey Cache key.
* @param {*} data Cache data to store.
* @returns {boolean}
*/
export const setCache = ( cacheType, cacheKey, data ) => {
if ( 0 > indexOf( [ 'localStorage', 'sessionStorage' ], cacheType ) ) {
return;
}
if ( ! storageAvailable( cacheType ) ) {
return;
}
window[ cacheType ].setItem( cacheKey, data );
return true;
};
/**
* Get Cache from Browser Storage.
*
* @param {string} cacheType Browser storage.
* @param {string} cacheKey Cache key.
* @returns {*}
*/
export const getCache = ( cacheType, cacheKey ) => {
if ( 0 > indexOf( [ 'localStorage', 'sessionStorage' ], cacheType ) ) {
return;
}
if ( ! storageAvailable( cacheType ) ) {
return;
}
return window[ cacheType ].getItem( cacheKey );
};
/**
* Delete Cache from Browser Storage.
*
* @param {string} cacheType Browser storage.
* @param {string} cacheKey Cache key.
* @returns {*}
*/
export const deleteCache = ( cacheType, cacheKey ) => {
if ( 0 > indexOf( [ 'localStorage', 'sessionStorage' ], cacheType ) ) {
return;
}
if ( ! storageAvailable( cacheType ) ) {
return;
}
window[ cacheType ].removeItem( cacheKey );
return true;
};
export const findTagInHtmlContent = ( html, module ) => {
let existingTag = false;
if ( ! html ) {
return false;
}
existingTag = extractTag( html, module );
return existingTag;
};
/**
* Looks for existing tag requesting front end html, if no existing tag was found on server side
* while requesting list of accounts.
*
* @param {string} module Module slug.
*/
export const getExistingTag = async( module ) => {
try {
let tagFound = data.getCache( module, 'existingTag', 300 );
if ( false === tagFound ) {
const html = await fetch( `${googlesitekit.admin.homeURL}?tagverify=1×tamp=${Date.now()}` ).then( res => {
return res.text();
} );
tagFound = findTagInHtmlContent( html, module );
if ( ! tagFound ) {
tagFound = '';
}
}
data.setCache( module, 'existingTag', tagFound );
return new Promise( ( resolve ) => {
resolve( tagFound );
} );
} catch ( err ) {
// nothing.
}
};
/**
* Extracts the tag related to a module from the given string.
*
* @param {string} string The string from where to find the tag.
* @param {string} tag The tag to search for, one of 'adsense' or 'analytics'
*
* @return string|bool The tag id if found, otherwise false.
*/
export const extractTag = ( string, tag ) => {
let result = false;
let reg = null;
switch ( tag ) {
case 'analytics':
// Detect analytics tag variations.
reg = new RegExp( /<script async(?:="")? src=['|"]https:\/\/www.googletagmanager.com\/gtag\/js\?id=(.*?)['|"]><\/script>/gm );
result = reg.exec( string );
result = result ? result[1] : false;
if ( ! result ) {
reg = new RegExp( /__gaTracker\( ?['|"]create['|"], ?['|"](.*?)['|"], ?['|"]auto['|"] ?\)/gm );
result = reg.exec( string );
result = result ? result[1] : false;
}
if ( ! result ) {
reg = new RegExp( /ga\( ?['|"]create['|"], ?['|"](.*?)['|"], ?['|"]auto['|"] ?\)/gm );
result = reg.exec( string );
result = result ? result[1] : false;
}
if ( ! result ) {
reg = new RegExp( /_gaq.push\( ?\[ ?['|"]_setAccount['|"], ?['|"](.*?)['|"] ?] ?\)/gm );
result = reg.exec( string );
result = result ? result[1] : false;
}
break;
case 'adsense':
reg = new RegExp( /google_ad_client: ?["|'](.*?)["|']/gm );
result = reg.exec( string );
result = result ? result[1] : false;
break;
}
return result;
};
/**
* Activate or Deactivate a Module.
*
* @param {object} restApiClient Rest API client from data module, this needed so we don't need to import data module in helper.
* @param {string} moduleSlug Module slug to activate or deactivate.
* @param {boolean} status True if module should be activated, false if it should be deactivated.
* @returns {Promise}
*/
export const activateOrDeactivateModule = ( restApiClient, moduleSlug, status ) => {
return restApiClient.setModuleData( moduleSlug, 'active', status ).then( ( responseData ) => {
sendAnalyticsTrackingEvent(
`${moduleSlug}_setup`,
! responseData.active ? 'module_deactivate' : 'module_activate',
moduleSlug,
);
return new Promise( ( resolve ) => {
resolve( responseData );
} );
} );
};
/**
* Helper to toggle confirm changes button disable/enable
* depending on the module changed settings.
*
* @param {string} moduleSlug The module slug being edited.
* @param {object} settingsMapping The mapping between form settings names and saved settings.
* @param {object} settingsState The changed settings component state to compare with.
* @param {object} skipDOM Skip DOm checks/modifications, used for testing.
*/
export const toggleConfirmModuleSettings = ( moduleSlug, settingsMapping, settingsState, skipDOM = false ) => {
const { settings, setupComplete } = googlesitekit.modules[ moduleSlug ];
const confirm = skipDOM || document.getElementById( `confirm-changes-${moduleSlug}` );
if ( ! setupComplete || ! confirm ) {
return;
}
const currentSettings = [];
Object.keys( settingsState ).forEach( key => {
if ( -1 < Object.keys( settingsMapping ).indexOf( key ) ) {
currentSettings[ settingsMapping[key] ] = settingsState[ key ];
}
} );
const savedSettings = [];
Object.keys( settings ).forEach( key => {
if ( -1 < Object.values( settingsMapping ).indexOf( key ) ) {
savedSettings[ key ] = settings[ key ];
}
} );
const changed = Object.keys( savedSettings ).filter( key => {
if ( savedSettings[key] !== currentSettings[ key ] ) {
return true;
}
} );
if ( 0 < changed.length ) {
if ( skipDOM ) {
return true;
}
confirm.removeAttribute( 'disabled' );
} else {
if ( skipDOM ) {
return false;
}
confirm.setAttribute( 'disabled', 'disabled' );
}
};
/**
* Trigger error notification on top of the page.
*
* @param {Component} ErrorComponent The error component to render in place.
* @param {object} props The props to pass down to the error component. Optional.
*/
export const showErrorNotification = ( ErrorComponent, props = {} ) => {
addFilter( 'googlesitekit.ErrorNotification',
'googlesitekit.ErrorNotification',
fillFilterWithComponent( ErrorComponent, props ), 1 );
};
/**
* HTML text into HTML entity.
*
* _.unescape doesn't seem to decode some entities for admin bar titles.
* adding combination in this helper as a workaround.
*
* @param {string} str The string to decode.
*
* @return {string}
*/
export const decodeHtmlEntity = ( str ) => {
const decoded = str.replace( /&#(\d+);/g, function( match, dec ) {
return String.fromCharCode( dec );
} );
return unescape( decoded );
};
/**
* Performs some basic cleanup of a string for use as a post slug
*
* Emnulates santize_title() from WordPress core.
*
* @return {string} Processed string
*/
export function stringToSlug( string ) {
return toLower( deburr( trim( string.replace( /[\s./_]+/g, '-' ), '-' ) ) );
}
/**
* Return the currently selected date range as a string that fits in the sentence:
* "Data for the last [date range]", eg "Date for the last 28 days".
*/
export function getDateRangeFrom() {
const currentDateRange = applyFilters( 'googlesitekit.dateRange', __( 'Last 28 days', 'google-site-kit' ) );
return currentDateRange.replace( 'Last ', '' );
}
/**
* Get the icon for a module.
*
* @param {string} module The module slug.
* @param {boolean} blockedByParentModule Whether the module is blocked by a parent module.
* @param {string} width The icon width.
* @param {string} height The icon height.
* @param {string} class Class string to use for icon.
*/
export function moduleIcon( module, blockedByParentModule, width = '33', height = '33', useClass = '' ) {
if ( ! googlesitekit ) {
return;
}
/* Set module icons. Page Speed Insights is a special case because only a .png is available. */
let moduleIcon = <SvgIcon id={ module } width={ width } height={ height } className={ useClass }/>;
if ( blockedByParentModule ) {
moduleIcon = <SvgIcon id={ `${ module }-disabled` } width={ width } height={ height } className={ useClass }/>;
} else if ( 'pagespeed-insights' === module ) {
moduleIcon = <img src={ googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ width } alt="" className={ useClass }/>;
}
return moduleIcon;
}
/**
* Clear App localstorage.
*/
export function clearAppLocalStorage() {
if ( localStorage ) {
localStorage.clear();
}
if ( sessionStorage ) {
sessionStorage.clear();
}
}
| 1 | 24,671 | Two things to make the code more error-proof and simplify it: 1. To be safe, this should be: `! isUndefined( decimal ) && ! isUndefined( decimal.value )` 2. The other clause that you changed below can be combined with that since `decimal` isn't used anywhere else, and so there's no point to re-check whether it's not undefined. In other words, this should become: `! isUndefined( decimal ) && ! isUndefined( decimal.value ) && 1000 > number`, and then you can remove the `decimal = decimal.value` assignment and replace it with line 146 (assignment of `readableNumber`). | google-site-kit-wp | js |
@@ -26,6 +26,15 @@ import "context"
// Outbound is the common interface for all outbounds
type Outbound interface {
+ // Transports returns the transports that used by this outbound, so they
+ // can be collected for lifecycle management, typically by a Dispatcher.
+ //
+ // Though most outbounds only use a single transport, composite outbounds
+ // may use multiple transport protocols, particularly for shadowing traffic
+ // across multiple transport protocols during a transport protocol
+ // migration.
+ Transports() []Transport
+
// Sets up the outbound to start making calls.
//
// This MUST block until the outbound is ready to start sending requests. | 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
import "context"
//go:generate mockgen -destination=transporttest/outbound.go -package=transporttest go.uber.org/yarpc/transport UnaryOutbound,OnewayOutbound
// Outbound is the common interface for all outbounds
type Outbound interface {
// Sets up the outbound to start making calls.
//
// This MUST block until the outbound is ready to start sending requests.
// This MUST be idempotent and thread-safe. If called multiple times, only
// the first call's dependencies are used
Start() error
// Stops the outbound, cleaning up any resources held by the Outbound.
//
// This MUST be idempotent and thread-safe. This MAY be called more than once
Stop() error
}
// UnaryOutbound is a transport that knows how to send unary requests for procedure
// calls.
type UnaryOutbound interface {
Outbound
// Call sends the given request through this transport and returns its
// response.
//
// This MUST NOT be called before Start() has been called successfully. This
// MAY panic if called without calling Start(). This MUST be safe to call
// concurrently.
Call(ctx context.Context, request *Request) (*Response, error)
}
// OnewayOutbound is a transport that knows how to send oneway requests for
// procedure calls.
type OnewayOutbound interface {
Outbound
// CallOneway sends the given request through this transport and returns an
// ack.
//
// This MUST NOT be called before Start() has been called successfully. This
// MAY panic if called without calling Start(). This MUST be safe to call
// concurrently.
CallOneway(ctx context.Context, request *Request) (Ack, error)
}
// Outbounds encapsulates outbound types for a service
type Outbounds struct {
Unary UnaryOutbound
Oneway OnewayOutbound
}
| 1 | 11,565 | Wouldn't composite outbounds compose the transport, such that it would still be represented as a single transport? | yarpc-yarpc-go | go |
@@ -28,6 +28,7 @@ import (
func testAlertmanagerInstanceNamespacesAllNs(t *testing.T) {
ctx := framework.NewTestCtx(t)
+
defer ctx.Cleanup(t)
// create 3 namespaces: | 1 | // Copyright 2019 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"fmt"
"strings"
"testing"
api_errors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
testFramework "github.com/prometheus-operator/prometheus-operator/test/framework"
)
func testAlertmanagerInstanceNamespacesAllNs(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup(t)
// create 3 namespaces:
//
// 1. "operator" ns:
// - hosts the prometheus operator deployment
//
// 2. "instance" ns:
// - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance"
//
// 3. "nonInstance" ns:
// - hosts an Alertmanager CR which must not be reconciled
operatorNs := ctx.CreateNamespace(t, framework.KubeClient)
instanceNs := ctx.CreateNamespace(t, framework.KubeClient)
nonInstanceNs := ctx.CreateNamespace(t, framework.KubeClient)
ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient)
_, err := framework.CreatePrometheusOperator(operatorNs, *opImage, nil, nil, nil, []string{instanceNs}, false, true)
if err != nil {
t.Fatal(err)
}
am := framework.MakeBasicAlertmanager("non-instance", 3)
am.Namespace = nonInstanceNs
_, err = framework.MonClientV1.Alertmanagers(nonInstanceNs).Create(framework.Ctx, am, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
am = framework.MakeBasicAlertmanager("instance", 3)
am.Namespace = instanceNs
if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil {
t.Fatal(err)
}
sts, err := framework.KubeClient.AppsV1().StatefulSets(nonInstanceNs).Get(framework.Ctx, "alertmanager-instance", metav1.GetOptions{})
if !api_errors.IsNotFound(err) {
t.Fatalf("expected not to find an Alertmanager statefulset, but did: %v/%v", sts.Namespace, sts.Name)
}
}
func testAlertmanagerInstanceNamespacesDenyNs(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup(t)
// create two namespaces:
//
// 1. "operator" ns:
// - hosts the prometheus operator deployment
//
// 2. "instance" ns:
// - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance"
// - will additionally be configured on prometheus operator as --deny-namespaces="instance"
// - hosts an alertmanager CR which must be reconciled.
operatorNs := ctx.CreateNamespace(t, framework.KubeClient)
instanceNs := ctx.CreateNamespace(t, framework.KubeClient)
ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient)
_, err := framework.CreatePrometheusOperator(operatorNs, *opImage, nil, []string{instanceNs}, nil, []string{instanceNs}, false, true)
if err != nil {
t.Fatal(err)
}
am := framework.MakeBasicAlertmanager("instance", 3)
am.Namespace = instanceNs
if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil {
t.Fatal(err)
}
}
func testAlertmanagerInstanceNamespacesAllowList(t *testing.T) {
ctx := framework.NewTestCtx(t)
defer ctx.Cleanup(t)
// create 3 namespaces:
//
// 1. "operator" ns:
// - hosts the prometheus operator deployment
//
// 2. "instance" ns:
// - will be configured on prometheus operator as --alertmanager-instance-namespaces="instance"
// - hosts an Alertmanager CR which will select AlertmanagerConfig resources in all "allowed" namespaces.
// - hosts an AlertmanagerConfig CR which must not be reconciled.
//
// 3. "allowed" ns:
// - will be configured on prometheus operator as --namespaces="allowed"
// - hosts an AlertmanagerConfig CR which must be reconciled
// - hosts an Alertmanager CR which must not reconciled.
operatorNs := ctx.CreateNamespace(t, framework.KubeClient)
instanceNs := ctx.CreateNamespace(t, framework.KubeClient)
allowedNs := ctx.CreateNamespace(t, framework.KubeClient)
ctx.SetupPrometheusRBACGlobal(t, instanceNs, framework.KubeClient)
for _, ns := range []string{allowedNs, instanceNs} {
err := testFramework.AddLabelsToNamespace(framework.KubeClient, ns, map[string]string{
"monitored": "true",
})
if err != nil {
t.Fatal(err)
}
}
// Configure the operator to watch also a non-existing namespace (e.g. "notfound").
_, err := framework.CreatePrometheusOperator(operatorNs, *opImage, []string{"notfound", allowedNs}, nil, nil, []string{"notfound", instanceNs}, false, true)
if err != nil {
t.Fatal(err)
}
// Create the Alertmanager resource in the "allowed" namespace. We will check later that it is NOT reconciled.
am := framework.MakeBasicAlertmanager("instance", 3)
am.Spec.AlertmanagerConfigSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"group": "monitored",
},
}
am.Spec.AlertmanagerConfigNamespaceSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"monitored": "true",
},
}
// Create an Alertmanager resource in the "allowedNs" namespace which must *not* be reconciled.
_, err = framework.MonClientV1.Alertmanagers(allowedNs).Create(framework.Ctx, am.DeepCopy(), metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
// Create an Alertmanager resource in the "instance" namespace which must be reconciled.
if _, err := framework.CreateAlertmanagerAndWaitUntilReady(instanceNs, am); err != nil {
t.Fatal(err)
}
// Check that the Alertmanager resource created in the "allowed" namespace hasn't been reconciled.
sts, err := framework.KubeClient.AppsV1().StatefulSets(allowedNs).Get(framework.Ctx, "alertmanager-instance", metav1.GetOptions{})
if !api_errors.IsNotFound(err) {
t.Fatalf("expected not to find an Alertmanager statefulset, but did: %v/%v", sts.Namespace, sts.Name)
}
// Create the AlertmanagerConfig resources in the "instance" and "allowed" namespaces.
amConfig := &monitoringv1alpha1.AlertmanagerConfig{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-test-amconfig-multi-namespace",
Labels: map[string]string{
"group": "monitored",
},
},
Spec: monitoringv1alpha1.AlertmanagerConfigSpec{
Route: &monitoringv1alpha1.Route{
Receiver: "void",
},
Receivers: []monitoringv1alpha1.Receiver{{
Name: "void",
}},
},
}
if _, err = framework.MonClientV1alpha1.AlertmanagerConfigs(instanceNs).Create(framework.Ctx, amConfig, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err = framework.MonClientV1alpha1.AlertmanagerConfigs(allowedNs).Create(framework.Ctx, amConfig, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
// Check that the AlertmanagerConfig resource in the "allowed" namespace is reconciled but not the one in "instance".
err = framework.PollAlertmanagerConfiguration(instanceNs, "instance",
func(config string) error {
if !strings.Contains(config, "void") {
return fmt.Errorf("expected generated configuration to contain %q but got %q", "void", config)
}
return nil
},
func(config string) error {
if strings.Contains(config, instanceNs) {
return fmt.Errorf("expected generated configuration to not contain %q but got %q", instanceNs, config)
}
return nil
},
)
if err != nil {
t.Fatalf("failed to wait for alertmanager config: %v", err)
}
// FIXME(simonpasquier): the unprivileged namespace lister/watcher
// isn't notified of updates properly so the code below fails.
// Uncomment the test once the lister/watcher is fixed.
//
// Remove the selecting label on the "allowed" namespace and check that
// the alertmanager configuration is updated.
// See https://github.com/prometheus-operator/prometheus-operator/issues/3847
//if err := testFramework.RemoveLabelsFromNamespace(framework.KubeClient, allowedNs, "monitored"); err != nil {
// t.Fatal(err)
//}
//err = framework.PollAlertmanagerConfiguration(instanceNs, "instance",
// func(config string) error {
// if strings.Contains(config, "void") {
// return fmt.Errorf("expected generated configuration to not contain %q but got %q", "void", config)
// }
// return nil
// },
//)
//if err != nil {
// t.Fatalf("failed to wait for alertmanager config: %v", err)
//}
}
| 1 | 16,319 | nit: there is no need for a new line here, usually we tend to put an object creation and its deferred deletion next to each other. It helps not forgetting about the cleanup. | prometheus-operator-prometheus-operator | go |
@@ -7,6 +7,12 @@ use JavierEguiluz\Bundle\EasyAdminBundle\Exception\EntityNotFoundException;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\HttpFoundation\Request;
+/**
+ * Adds some custom attributes to the request object to store information
+ * related to EasyAdmin.
+ *
+ * @author Maxime Steinhausser
+ */
class RequestPostInitializeListener
{
/** @var Request|null */ | 1 | <?php
namespace JavierEguiluz\Bundle\EasyAdminBundle\EventListener;
use Doctrine\Bundle\DoctrineBundle\Registry;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\EntityNotFoundException;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\HttpFoundation\Request;
class RequestPostInitializeListener
{
/** @var Request|null */
private $request;
/** @var Registry */
private $doctrine;
/**
* @param Registry $doctrine
*/
public function __construct(Registry $doctrine)
{
$this->doctrine = $doctrine;
}
/**
* BC for SF < 2.4.
* To be replaced by the usage of the request stack when 2.3 support is dropped.
*
* @param Request|null $request
*
* @return $this
*/
public function setRequest(Request $request = null)
{
$this->request = $request;
}
public function initializeRequest(GenericEvent $event)
{
if (null === $this->request) {
return;
}
$this->request->attributes->set('easyadmin', array(
'entity' => $entity = $event->getArgument('entity'),
'view' => $this->request->query->get('action', 'list'),
'item' => ($id = $this->request->query->get('id')) ? $this->findCurrentItem($entity, $id) : null,
));
}
/**
* Looks for the object that corresponds to the selected 'id' of the current entity.
*
* @param array $entity
* @param mixed $id
*
* @return object The entity
*
* @throws EntityNotFoundException
*/
private function findCurrentItem(array $entity, $id)
{
if (!$entity = $this->doctrine->getRepository($entity['class'])->find($id)) {
throw new EntityNotFoundException(array('entity' => $entity, 'entity_id' => $id));
}
return $entity;
}
}
| 1 | 9,211 | Feel free to use the full notation with my email address: `Maxime Steinhausser <[email protected]>` :smile: | EasyCorp-EasyAdminBundle | php |
@@ -44,7 +44,6 @@ const FCGINullRequestID uint8 = 0
// FCGIKeepConn describes keep connection mode.
const FCGIKeepConn uint8 = 1
-const doubleCRLF = "\r\n\r\n"
const (
// BeginRequest is the begin request flag. | 1 | // Forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client
// (which is forked from https://code.google.com/p/go-fastcgi-client/)
// This fork contains several fixes and improvements by Matt Holt and
// other contributors to this project.
// Copyright 2012 Junqing Tan <[email protected]> and The Go Authors
// Use of this source code is governed by a BSD-style
// Part of source code is from Go fcgi package
package fastcgi
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"io"
"io/ioutil"
"mime/multipart"
"net"
"net/http"
"net/http/httputil"
"net/textproto"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
)
// FCGIListenSockFileno describes listen socket file number.
const FCGIListenSockFileno uint8 = 0
// FCGIHeaderLen describes header length.
const FCGIHeaderLen uint8 = 8
// Version1 describes the version.
const Version1 uint8 = 1
// FCGINullRequestID describes the null request ID.
const FCGINullRequestID uint8 = 0
// FCGIKeepConn describes keep connection mode.
const FCGIKeepConn uint8 = 1
const doubleCRLF = "\r\n\r\n"
const (
// BeginRequest is the begin request flag.
BeginRequest uint8 = iota + 1
// AbortRequest is the abort request flag.
AbortRequest
// EndRequest is the end request flag.
EndRequest
// Params is the parameters flag.
Params
// Stdin is the standard input flag.
Stdin
// Stdout is the standard output flag.
Stdout
// Stderr is the standard error flag.
Stderr
// Data is the data flag.
Data
// GetValues is the get values flag.
GetValues
// GetValuesResult is the get values result flag.
GetValuesResult
// UnknownType is the unknown type flag.
UnknownType
// MaxType is the maximum type flag.
MaxType = UnknownType
)
const (
// Responder is the responder flag.
Responder uint8 = iota + 1
// Authorizer is the authorizer flag.
Authorizer
// Filter is the filter flag.
Filter
)
const (
// RequestComplete is the completed request flag.
RequestComplete uint8 = iota
// CantMultiplexConns is the multiplexed connections flag.
CantMultiplexConns
// Overloaded is the overloaded flag.
Overloaded
// UnknownRole is the unknown role flag.
UnknownRole
)
const (
// MaxConns is the maximum connections flag.
MaxConns string = "MAX_CONNS"
// MaxRequests is the maximum requests flag.
MaxRequests string = "MAX_REQS"
// MultiplexConns is the multiplex connections flag.
MultiplexConns string = "MPXS_CONNS"
)
const (
maxWrite = 65500 // 65530 may work, but for compatibility
maxPad = 255
)
type header struct {
Version uint8
Type uint8
ID uint16
ContentLength uint16
PaddingLength uint8
Reserved uint8
}
// for padding so we don't have to allocate all the time
// not synchronized because we don't care what the contents are
var pad [maxPad]byte
func (h *header) init(recType uint8, reqID uint16, contentLength int) {
h.Version = 1
h.Type = recType
h.ID = reqID
h.ContentLength = uint16(contentLength)
h.PaddingLength = uint8(-contentLength & 7)
}
type record struct {
h header
rbuf []byte
}
func (rec *record) read(r io.Reader) (buf []byte, err error) {
if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil {
return
}
if rec.h.Version != 1 {
err = errInvalidHeaderVersion
return
}
if rec.h.Type == EndRequest {
err = io.EOF
return
}
n := int(rec.h.ContentLength) + int(rec.h.PaddingLength)
if len(rec.rbuf) < n {
rec.rbuf = make([]byte, n)
}
if _, err = io.ReadFull(r, rec.rbuf[:n]); err != nil {
return
}
buf = rec.rbuf[:int(rec.h.ContentLength)]
return
}
// FCGIClient implements a FastCGI client, which is a standard for
// interfacing external applications with Web servers.
type FCGIClient struct {
mutex sync.Mutex
rwc io.ReadWriteCloser
h header
buf bytes.Buffer
stderr bytes.Buffer
keepAlive bool
reqID uint16
}
// DialWithDialer connects to the fcgi responder at the specified network address, using custom net.Dialer.
// See func net.Dial for a description of the network and address parameters.
func DialWithDialer(network, address string, dialer net.Dialer) (fcgi *FCGIClient, err error) {
var conn net.Conn
conn, err = dialer.Dial(network, address)
if err != nil {
return
}
fcgi = &FCGIClient{
rwc: conn,
keepAlive: false,
reqID: 1,
}
return
}
// Dial connects to the fcgi responder at the specified network address, using default net.Dialer.
// See func net.Dial for a description of the network and address parameters.
func Dial(network, address string) (fcgi *FCGIClient, err error) {
return DialWithDialer(network, address, net.Dialer{})
}
// Close closes fcgi connnection.
func (c *FCGIClient) Close() error {
return c.rwc.Close()
}
func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.buf.Reset()
c.h.init(recType, c.reqID, len(content))
if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil {
return err
}
if _, err := c.buf.Write(content); err != nil {
return err
}
if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil {
return err
}
_, err = c.rwc.Write(c.buf.Bytes())
return err
}
func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error {
b := [8]byte{byte(role >> 8), byte(role), flags}
return c.writeRecord(BeginRequest, b[:])
}
func (c *FCGIClient) writeEndRequest(appStatus int, protocolStatus uint8) error {
b := make([]byte, 8)
binary.BigEndian.PutUint32(b, uint32(appStatus))
b[4] = protocolStatus
return c.writeRecord(EndRequest, b)
}
func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error {
w := newWriter(c, recType)
b := make([]byte, 8)
nn := 0
for k, v := range pairs {
m := 8 + len(k) + len(v)
if m > maxWrite {
// param data size exceed 65535 bytes"
vl := maxWrite - 8 - len(k)
v = v[:vl]
}
n := encodeSize(b, uint32(len(k)))
n += encodeSize(b[n:], uint32(len(v)))
m = n + len(k) + len(v)
if (nn + m) > maxWrite {
w.Flush()
nn = 0
}
nn += m
if _, err := w.Write(b[:n]); err != nil {
return err
}
if _, err := w.WriteString(k); err != nil {
return err
}
if _, err := w.WriteString(v); err != nil {
return err
}
}
w.Close()
return nil
}
func readSize(s []byte) (uint32, int) {
if len(s) == 0 {
return 0, 0
}
size, n := uint32(s[0]), 1
if size&(1<<7) != 0 {
if len(s) < 4 {
return 0, 0
}
n = 4
size = binary.BigEndian.Uint32(s)
size &^= 1 << 31
}
return size, n
}
func readString(s []byte, size uint32) string {
if size > uint32(len(s)) {
return ""
}
return string(s[:size])
}
func encodeSize(b []byte, size uint32) int {
if size > 127 {
size |= 1 << 31
binary.BigEndian.PutUint32(b, size)
return 4
}
b[0] = byte(size)
return 1
}
// bufWriter encapsulates bufio.Writer but also closes the underlying stream when
// Closed.
type bufWriter struct {
closer io.Closer
*bufio.Writer
}
func (w *bufWriter) Close() error {
if err := w.Writer.Flush(); err != nil {
w.closer.Close()
return err
}
return w.closer.Close()
}
func newWriter(c *FCGIClient, recType uint8) *bufWriter {
s := &streamWriter{c: c, recType: recType}
w := bufio.NewWriterSize(s, maxWrite)
return &bufWriter{s, w}
}
// streamWriter abstracts out the separation of a stream into discrete records.
// It only writes maxWrite bytes at a time.
type streamWriter struct {
c *FCGIClient
recType uint8
}
func (w *streamWriter) Write(p []byte) (int, error) {
nn := 0
for len(p) > 0 {
n := len(p)
if n > maxWrite {
n = maxWrite
}
if err := w.c.writeRecord(w.recType, p[:n]); err != nil {
return nn, err
}
nn += n
p = p[n:]
}
return nn, nil
}
func (w *streamWriter) Close() error {
// send empty record to close the stream
return w.c.writeRecord(w.recType, nil)
}
type streamReader struct {
c *FCGIClient
buf []byte
}
func (w *streamReader) Read(p []byte) (n int, err error) {
if len(p) > 0 {
if len(w.buf) == 0 {
// filter outputs for error log
for {
rec := &record{}
var buf []byte
buf, err = rec.read(w.c.rwc)
if err == errInvalidHeaderVersion {
continue
} else if err != nil {
return
}
// standard error output
if rec.h.Type == Stderr {
w.c.stderr.Write(buf)
continue
}
w.buf = buf
break
}
}
n = len(p)
if n > len(w.buf) {
n = len(w.buf)
}
copy(p, w.buf[:n])
w.buf = w.buf[n:]
}
return
}
// Do made the request and returns a io.Reader that translates the data read
// from fcgi responder out of fcgi packet before returning it.
func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) {
err = c.writeBeginRequest(uint16(Responder), FCGIKeepConn)
if err != nil {
return
}
err = c.writePairs(Params, p)
if err != nil {
return
}
body := newWriter(c, Stdin)
if req != nil {
io.Copy(body, req)
}
body.Close()
r = &streamReader{c: c}
return
}
// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer
// that closes FCGIClient connection.
type clientCloser struct {
f *FCGIClient
io.Reader
}
func (c clientCloser) Close() error { return c.f.Close() }
// Request returns a HTTP Response with Header and Body
// from fcgi responder
func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) {
r, err := c.Do(p, req)
if err != nil {
return
}
rb := bufio.NewReader(r)
tp := textproto.NewReader(rb)
resp = new(http.Response)
// Parse the response headers.
mimeHeader, err := tp.ReadMIMEHeader()
if err != nil && err != io.EOF {
return
}
resp.Header = http.Header(mimeHeader)
if resp.Header.Get("Status") != "" {
statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2)
resp.StatusCode, err = strconv.Atoi(statusParts[0])
if err != nil {
return
}
if len(statusParts) > 1 {
resp.Status = statusParts[1]
}
} else {
resp.StatusCode = http.StatusOK
}
// TODO: fixTransferEncoding ?
resp.TransferEncoding = resp.Header["Transfer-Encoding"]
resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if chunked(resp.TransferEncoding) {
resp.Body = clientCloser{c, httputil.NewChunkedReader(rb)}
} else {
resp.Body = clientCloser{c, ioutil.NopCloser(rb)}
}
return
}
// Get issues a GET request to the fcgi responder.
func (c *FCGIClient) Get(p map[string]string) (resp *http.Response, err error) {
p["REQUEST_METHOD"] = "GET"
p["CONTENT_LENGTH"] = "0"
return c.Request(p, nil)
}
// Head issues a HEAD request to the fcgi responder.
func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) {
p["REQUEST_METHOD"] = "HEAD"
p["CONTENT_LENGTH"] = "0"
return c.Request(p, nil)
}
// Options issues an OPTIONS request to the fcgi responder.
func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) {
p["REQUEST_METHOD"] = "OPTIONS"
p["CONTENT_LENGTH"] = "0"
return c.Request(p, nil)
}
// Post issues a POST request to the fcgi responder. with request body
// in the format that bodyType specified
func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, body io.Reader, l int) (resp *http.Response, err error) {
if p == nil {
p = make(map[string]string)
}
p["REQUEST_METHOD"] = strings.ToUpper(method)
if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" {
p["REQUEST_METHOD"] = "POST"
}
p["CONTENT_LENGTH"] = strconv.Itoa(l)
if len(bodyType) > 0 {
p["CONTENT_TYPE"] = bodyType
} else {
p["CONTENT_TYPE"] = "application/x-www-form-urlencoded"
}
return c.Request(p, body)
}
// PostForm issues a POST to the fcgi responder, with form
// as a string key to a list values (url.Values)
func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) {
body := bytes.NewReader([]byte(data.Encode()))
return c.Post(p, "POST", "application/x-www-form-urlencoded", body, body.Len())
}
// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard,
// with form as a string key to a list values (url.Values),
// and/or with file as a string key to a list file path.
func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) {
buf := &bytes.Buffer{}
writer := multipart.NewWriter(buf)
bodyType := writer.FormDataContentType()
for key, val := range data {
for _, v0 := range val {
err = writer.WriteField(key, v0)
if err != nil {
return
}
}
}
for key, val := range file {
fd, e := os.Open(val)
if e != nil {
return nil, e
}
defer fd.Close()
part, e := writer.CreateFormFile(key, filepath.Base(val))
if e != nil {
return nil, e
}
_, err = io.Copy(part, fd)
if err != nil {
return
}
}
err = writer.Close()
if err != nil {
return
}
return c.Post(p, "POST", bodyType, buf, buf.Len())
}
// Checks whether chunked is part of the encodings stack
func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
var errInvalidHeaderVersion = errors.New("fcgi: invalid header version")
| 1 | 9,353 | This was unused across the codebase | caddyserver-caddy | go |
@@ -2278,7 +2278,8 @@ func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) {
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
- // TODO(dlc) - log error
+ js.mu.Unlock()
+ s.Warnf("Account lookup for consumer create failed: %v", err)
return
}
| 1 | // Copyright 2020-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"math/rand"
"path"
"reflect"
"sort"
"strings"
"sync/atomic"
"time"
"github.com/klauspost/compress/s2"
"github.com/nats-io/nuid"
)
// jetStreamCluster holds information about the meta group and stream assignments.
type jetStreamCluster struct {
// The metacontroller raftNode.
meta RaftNode
// For stream and consumer assignments. All servers will have this be the same.
// ACC -> STREAM -> Stream Assignment -> Consumers
streams map[string]map[string]*streamAssignment
// Server.
s *Server
// Internal client.
c *client
// Processing assignment results.
streamResults *subscription
consumerResults *subscription
// System level request to have the leader stepdown.
stepdown *subscription
// System level requests to remove a peer.
peerRemove *subscription
}
// Used to guide placement of streams and meta controllers in clustered JetStream.
type Placement struct {
Cluster string `json:"cluster"`
Tags []string `json:"tags,omitempty"`
}
// Define types of the entry.
type entryOp uint8
const (
// Meta ops.
assignStreamOp entryOp = iota
assignConsumerOp
removeStreamOp
removeConsumerOp
// Stream ops.
streamMsgOp
purgeStreamOp
deleteMsgOp
// Consumer ops
updateDeliveredOp
updateAcksOp
// Compressed consumer assignments.
assignCompressedConsumerOp
// Filtered Consumer skip.
updateSkipOp
// Update Stream
updateStreamOp
)
// raftGroups are controlled by the metagroup controller.
// The raftGroups will house streams and consumers.
type raftGroup struct {
Name string `json:"name"`
Peers []string `json:"peers"`
Storage StorageType `json:"store"`
Preferred string `json:"preferred,omitempty"`
// Internal
node RaftNode
}
// streamAssignment is what the meta controller uses to assign streams to peers.
type streamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Subject string `json:"subject"`
Reply string `json:"reply"`
Restore *StreamState `json:"restore_state,omitempty"`
// Internal
consumers map[string]*consumerAssignment
responded bool
err error
}
// consumerAssignment is what the meta controller uses to assign consumers to streams.
type consumerAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Name string `json:"name"`
Stream string `json:"stream"`
Config *ConsumerConfig `json:"consumer"`
Group *raftGroup `json:"group"`
Subject string `json:"subject"`
Reply string `json:"reply"`
State *ConsumerState `json:"state,omitempty"`
// Internal
responded bool
deleted bool
err error
}
// streamPurge is what the stream leader will replicate when purging a stream.
type streamPurge struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
LastSeq uint64 `json:"last_seq"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
// streamMsgDelete is what the stream leader will replicate when deleting a message.
type streamMsgDelete struct {
Client *ClientInfo `json:"client,omitempty"`
Stream string `json:"stream"`
Seq uint64 `json:"seq"`
NoErase bool `json:"no_erase,omitempty"`
Subject string `json:"subject"`
Reply string `json:"reply"`
}
const (
defaultStoreDirName = "_js_"
defaultMetaGroupName = "_meta_"
defaultMetaFSBlkSize = 1024 * 1024
)
// For validating clusters.
func validateJetStreamOptions(o *Options) error {
// If not clustered no checks.
if !o.JetStream || o.Cluster.Port == 0 {
return nil
}
if o.ServerName == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `server_name` to be set")
}
if o.Cluster.Name == _EMPTY_ {
return fmt.Errorf("jetstream cluster requires `cluster.name` to be set")
}
return nil
}
func (s *Server) getJetStreamCluster() (*jetStream, *jetStreamCluster) {
s.mu.Lock()
shutdown := s.shutdown
js := s.js
s.mu.Unlock()
if shutdown || js == nil {
return nil, nil
}
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
if cc == nil {
return nil, nil
}
return js, cc
}
func (s *Server) JetStreamIsClustered() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
isClustered := js.cluster != nil
js.mu.RUnlock()
return isClustered
}
func (s *Server) JetStreamIsLeader() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isLeader()
}
func (s *Server) JetStreamIsCurrent() bool {
js := s.getJetStream()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isCurrent()
}
func (s *Server) JetStreamSnapshotMeta() error {
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return errNotLeader
}
return cc.meta.InstallSnapshot(js.metaSnapshot())
}
func (s *Server) JetStreamStepdownStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.lookupStream(stream)
if err != nil {
return err
}
if node := mset.raftNode(); node != nil && node.Leader() {
node.StepDown()
}
return nil
}
func (s *Server) JetStreamSnapshotStream(account, stream string) error {
js, cc := s.getJetStreamCluster()
if js == nil {
return ErrJetStreamNotEnabled
}
if cc == nil {
return ErrJetStreamNotClustered
}
// Grab account
acc, err := s.LookupAccount(account)
if err != nil {
return err
}
// Grab stream
mset, err := acc.lookupStream(stream)
if err != nil {
return err
}
mset.mu.RLock()
if !mset.node.Leader() {
mset.mu.RUnlock()
return ErrJetStreamNotLeader
}
n := mset.node
mset.mu.RUnlock()
return n.InstallSnapshot(mset.stateSnapshot())
}
func (s *Server) JetStreamClusterPeers() []string {
js := s.getJetStream()
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if !cc.isLeader() {
return nil
}
peers := cc.meta.Peers()
var nodes []string
for _, p := range peers {
si, ok := s.nodeToInfo.Load(p.ID)
if !ok || si.(nodeInfo).offline {
continue
}
nodes = append(nodes, si.(nodeInfo).name)
}
return nodes
}
// Read lock should be held.
func (cc *jetStreamCluster) isLeader() bool {
if cc == nil {
// Non-clustered mode
return true
}
return cc.meta.Leader()
}
// isCurrent will determine if this node is a leader or an up to date follower.
// Read lock should be held.
func (cc *jetStreamCluster) isCurrent() bool {
if cc == nil {
// Non-clustered mode
return true
}
if cc.meta == nil {
return false
}
return cc.meta.Current()
}
// isStreamCurrent will determine if this node is a participant for the stream and if its up to date.
// Read lock should be held.
func (cc *jetStreamCluster) isStreamCurrent(account, stream string) bool {
if cc == nil {
// Non-clustered mode
return true
}
as := cc.streams[account]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil || rg.node == nil {
return false
}
isCurrent := rg.node.Current()
if isCurrent {
// Check if we are processing a snapshot and are catching up.
acc, err := cc.s.LookupAccount(account)
if err != nil {
return false
}
mset, err := acc.lookupStream(stream)
if err != nil {
return false
}
if mset.isCatchingUp() {
return false
}
}
return isCurrent
}
func (a *Account) getJetStreamFromAccount() (*Server, *jetStream, *jsAccount) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil, nil, nil
}
jsa.mu.RLock()
js := jsa.js
jsa.mu.RUnlock()
if js == nil {
return nil, nil, nil
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s, js, jsa
}
func (s *Server) JetStreamIsStreamLeader(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamLeader(account, stream)
}
func (a *Account) JetStreamIsStreamLeader(stream string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isStreamLeader(a.Name, stream)
}
func (s *Server) JetStreamIsStreamCurrent(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isStreamCurrent(account, stream)
}
func (a *Account) JetStreamIsConsumerLeader(stream, consumer string) bool {
s, js, jsa := a.getJetStreamFromAccount()
if s == nil || js == nil || jsa == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.cluster.isConsumerLeader(a.Name, stream, consumer)
}
func (s *Server) JetStreamIsConsumerLeader(account, stream, consumer string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
return cc.isConsumerLeader(account, stream, consumer)
}
func (s *Server) enableJetStreamClustering() error {
if !s.isRunning() {
return nil
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
// Already set.
if js.cluster != nil {
return nil
}
s.Noticef("Starting JetStream cluster")
// We need to determine if we have a stable cluster name and expected number of servers.
s.Debugf("JetStream cluster checking for stable cluster name and peers")
if s.isClusterNameDynamic() || s.configuredRoutes() == 0 {
return errors.New("JetStream cluster requires cluster name and explicit routes")
}
return js.setupMetaGroup()
}
func (js *jetStream) setupMetaGroup() error {
s := js.srv
s.Noticef("Creating JetStream metadata controller")
// Setup our WAL for the metagroup.
sysAcc := s.SystemAccount()
storeDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, defaultMetaGroupName)
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: defaultMetaFSBlkSize, AsyncFlush: false},
StreamConfig{Name: defaultMetaGroupName, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore: %v", err)
return err
}
cfg := &RaftConfig{Name: defaultMetaGroupName, Store: storeDir, Log: fs}
var bootstrap bool
if _, err := readPeerState(storeDir); err != nil {
s.Noticef("JetStream cluster bootstrapping")
bootstrap = true
peers := s.ActivePeers()
s.Debugf("JetStream cluster initial peers: %+v", peers)
if err := s.bootstrapRaftNode(cfg, peers, false); err != nil {
return err
}
} else {
s.Noticef("JetStream cluster recovering state")
}
// Start up our meta node.
n, err := s.startRaftNode(cfg)
if err != nil {
s.Warnf("Could not start metadata controller: %v", err)
return err
}
// If we are bootstrapped with no state, start campaign early.
if bootstrap {
n.Campaign()
}
c := s.createInternalJetStreamClient()
sacc := s.SystemAccount()
js.mu.Lock()
defer js.mu.Unlock()
js.cluster = &jetStreamCluster{
meta: n,
streams: make(map[string]map[string]*streamAssignment),
s: s,
c: c,
}
c.registerWithAccount(sacc)
js.srv.startGoRoutine(js.monitorCluster)
return nil
}
func (js *jetStream) getMetaGroup() RaftNode {
js.mu.RLock()
defer js.mu.RUnlock()
if js.cluster == nil {
return nil
}
return js.cluster.meta
}
func (js *jetStream) server() *Server {
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
return s
}
// Will respond if we do not think we have a metacontroller leader.
func (js *jetStream) isLeaderless() bool {
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
if cc == nil || cc.meta == nil {
return false
}
// If we don't have a leader.
// Make sure we have been running for enough time.
if cc.meta.GroupLeader() == _EMPTY_ && time.Since(cc.meta.Created()) > lostQuorumInterval {
return true
}
return false
}
// Will respond iff we are a member and we know we have no leader.
func (js *jetStream) isGroupLeaderless(rg *raftGroup) bool {
if rg == nil {
return false
}
js.mu.RLock()
defer js.mu.RUnlock()
cc := js.cluster
// If we are not a member we can not say..
if !rg.isMember(cc.meta.ID()) {
return false
}
// Single peer groups always have a leader if we are here.
if rg.node == nil {
return false
}
// If we don't have a leader.
if rg.node.GroupLeader() == _EMPTY_ {
if rg.node.HadPreviousLeader() {
return true
}
// Make sure we have been running for enough time.
if time.Since(rg.node.Created()) > lostQuorumInterval {
return true
}
}
return false
}
func (s *Server) JetStreamIsStreamAssigned(account, stream string) bool {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return false
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return false
}
return cc.isStreamAssigned(acc, stream)
}
// streamAssigned informs us if this server has this stream assigned.
func (jsa *jsAccount) streamAssigned(stream string) bool {
jsa.mu.RLock()
js, acc := jsa.js, jsa.account
jsa.mu.RUnlock()
if js == nil {
return false
}
js.mu.RLock()
assigned := js.cluster.isStreamAssigned(acc, stream)
js.mu.RUnlock()
return assigned
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamAssigned(a *Account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
as := cc.streams[a.Name]
if as == nil {
return false
}
sa := as[stream]
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isStreamLeader(account, stream string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
if cc.meta == nil {
return false
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
rg := sa.Group
if rg == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.Leader() {
return true
}
}
}
return false
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerLeader(account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
if cc.meta == nil {
return false
}
var sa *streamAssignment
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
if sa == nil {
return false
}
// Check if we are the leader of this raftGroup assigned to this consumer.
ca := sa.consumers[consumer]
if ca == nil {
return false
}
rg := ca.Group
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
if len(rg.Peers) == 1 || (rg.node != nil && rg.node.Leader()) {
return true
}
}
}
return false
}
func (js *jetStream) monitorCluster() {
s, n := js.server(), js.getMetaGroup()
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
defer s.grWG.Done()
s.Debugf("Starting metadata monitor")
defer s.Debugf("Exiting metadata monitor")
const compactInterval = 2 * time.Minute
t := time.NewTicker(compactInterval)
defer t.Stop()
var (
isLeader bool
lastSnap []byte
lastSnapTime time.Time
)
doSnapshot := func() {
if snap := js.metaSnapshot(); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
lastSnapTime = time.Now()
}
}
}
isRecovering := true
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
if ce == nil {
// Signals we have replayed all of our metadata.
isRecovering = false
s.Debugf("Recovered JetStream cluster metadata")
continue
}
// FIXME(dlc) - Deal with errors.
if didSnap, didRemoval, err := js.applyMetaEntries(ce.Entries, isRecovering); err == nil {
_, nb := n.Applied(ce.Index)
if js.hasPeerEntries(ce.Entries) || didSnap || (didRemoval && time.Since(lastSnapTime) > 2*time.Second) {
// Since we received one make sure we have our own since we do not store
// our meta state outside of raft.
doSnapshot()
} else if nb > uint64(len(lastSnap)*4) {
doSnapshot()
}
}
case isLeader = <-lch:
js.processLeaderChange(isLeader)
case <-t.C:
doSnapshot()
}
}
}
// Represents our stable meta state that we can write out.
type writeableStreamAssignment struct {
Client *ClientInfo `json:"client,omitempty"`
Created time.Time `json:"created"`
Config *StreamConfig `json:"stream"`
Group *raftGroup `json:"group"`
Sync string `json:"sync"`
Consumers []*consumerAssignment
}
func (js *jetStream) metaSnapshot() []byte {
var streams []writeableStreamAssignment
js.mu.RLock()
cc := js.cluster
for _, asa := range cc.streams {
for _, sa := range asa {
wsa := writeableStreamAssignment{
Client: sa.Client,
Created: sa.Created,
Config: sa.Config,
Group: sa.Group,
Sync: sa.Sync,
}
for _, ca := range sa.consumers {
wsa.Consumers = append(wsa.Consumers, ca)
}
streams = append(streams, wsa)
}
}
if len(streams) == 0 {
js.mu.RUnlock()
return nil
}
b, _ := json.Marshal(streams)
js.mu.RUnlock()
return s2.EncodeBetter(nil, b)
}
func (js *jetStream) applyMetaSnapshot(buf []byte, isRecovering bool) error {
if len(buf) == 0 {
return nil
}
jse, err := s2.Decode(nil, buf)
if err != nil {
return err
}
var wsas []writeableStreamAssignment
if err = json.Unmarshal(jse, &wsas); err != nil {
return err
}
// Build our new version here outside of js.
streams := make(map[string]map[string]*streamAssignment)
for _, wsa := range wsas {
as := streams[wsa.Client.serviceAccount()]
if as == nil {
as = make(map[string]*streamAssignment)
streams[wsa.Client.serviceAccount()] = as
}
sa := &streamAssignment{Client: wsa.Client, Created: wsa.Created, Config: wsa.Config, Group: wsa.Group, Sync: wsa.Sync}
if len(wsa.Consumers) > 0 {
sa.consumers = make(map[string]*consumerAssignment)
for _, ca := range wsa.Consumers {
sa.consumers[ca.Name] = ca
}
}
as[wsa.Config.Name] = sa
}
js.mu.Lock()
cc := js.cluster
var saAdd, saDel, saChk []*streamAssignment
// Walk through the old list to generate the delete list.
for account, asa := range cc.streams {
nasa := streams[account]
for sn, sa := range asa {
if nsa := nasa[sn]; nsa == nil {
saDel = append(saDel, sa)
} else {
saChk = append(saChk, nsa)
}
}
}
// Walk through the new list to generate the add list.
for account, nasa := range streams {
asa := cc.streams[account]
for sn, sa := range nasa {
if asa[sn] == nil {
saAdd = append(saAdd, sa)
}
}
}
// Now walk the ones to check and process consumers.
var caAdd, caDel []*consumerAssignment
for _, sa := range saChk {
if osa := js.streamAssignment(sa.Client.serviceAccount(), sa.Config.Name); osa != nil {
for _, ca := range osa.consumers {
if sa.consumers[ca.Name] == nil {
caDel = append(caDel, ca)
} else {
caAdd = append(caAdd, ca)
}
}
}
}
js.mu.Unlock()
// Do removals first.
for _, sa := range saDel {
if isRecovering {
js.setStreamAssignmentRecovering(sa)
}
js.processStreamRemoval(sa)
}
// Now do add for the streams. Also add in all consumers.
for _, sa := range saAdd {
if isRecovering {
js.setStreamAssignmentRecovering(sa)
}
js.processStreamAssignment(sa)
// We can simply add the consumers.
for _, ca := range sa.consumers {
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerAssignment(ca)
}
}
// Now do the deltas for existing stream's consumers.
for _, ca := range caDel {
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerRemoval(ca)
}
for _, ca := range caAdd {
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerAssignment(ca)
}
return nil
}
// Called on recovery to make sure we do not process like original.
func (js *jetStream) setStreamAssignmentRecovering(sa *streamAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
sa.responded = true
sa.Restore = nil
if sa.Group != nil {
sa.Group.Preferred = _EMPTY_
}
}
// Called on recovery to make sure we do not process like original.
func (js *jetStream) setConsumerAssignmentRecovering(ca *consumerAssignment) {
js.mu.Lock()
defer js.mu.Unlock()
ca.responded = true
if ca.Group != nil {
ca.Group.Preferred = _EMPTY_
}
}
// Just copied over and changes out the group so it can be encoded.
// Lock should be held.
func (sa *streamAssignment) copyGroup() *streamAssignment {
csa, cg := *sa, *sa.Group
csa.Group = &cg
csa.Group.Peers = append(sa.Group.Peers[:0:0], sa.Group.Peers...)
return &csa
}
func (js *jetStream) processRemovePeer(peer string) {
js.mu.Lock()
s, cc := js.srv, js.cluster
// All nodes will check if this is them.
isUs := cc.meta.ID() == peer
disabled := js.disabled
js.mu.Unlock()
// We may be already disabled.
if disabled {
return
}
if isUs {
s.Errorf("JetStream being DISABLED, our server was removed from the cluster")
adv := &JSServerRemovedAdvisory{
TypedEvent: TypedEvent{
Type: JSServerRemovedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Server: s.Name(),
ServerID: s.ID(),
Cluster: s.cachedClusterName(),
}
s.publishAdvisory(nil, JSAdvisoryServerRemoved, adv)
go s.DisableJetStream()
}
}
// Assumes all checks have already been done.
// Lock should be held.
func (js *jetStream) removePeerFromStream(sa *streamAssignment, peer string) {
s, cc := js.srv, js.cluster
csa := sa.copyGroup()
if !cc.remapStreamAssignment(csa, peer) {
s.Warnf("JetStream cluster could not remap stream '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
}
// Send our proposal for this csa. Also use same group definition for all the consumers as well.
cc.meta.Propose(encodeAddStreamAssignment(csa))
rg := csa.Group
for _, ca := range sa.consumers {
cca := *ca
cca.Group.Peers = rg.Peers
cc.meta.Propose(encodeAddConsumerAssignment(&cca))
}
}
// Check if we have peer related entries.
func (js *jetStream) hasPeerEntries(entries []*Entry) bool {
for _, e := range entries {
if e.Type == EntryRemovePeer || e.Type == EntryAddPeer {
return true
}
}
return false
}
func (js *jetStream) applyMetaEntries(entries []*Entry, isRecovering bool) (bool, bool, error) {
var didSnap, didRemove bool
for _, e := range entries {
if e.Type == EntrySnapshot {
js.applyMetaSnapshot(e.Data, isRecovering)
didSnap = true
} else if e.Type == EntryRemovePeer {
if !isRecovering {
js.processRemovePeer(string(e.Data))
}
} else {
buf := e.Data
switch entryOp(buf[0]) {
case assignStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentRecovering(sa)
}
didRemove = js.processStreamAssignment(sa)
case removeStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentRecovering(sa)
}
js.processStreamRemoval(sa)
didRemove = true
case assignConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerAssignment(ca)
case assignCompressedConsumerOp:
ca, err := decodeConsumerAssignmentCompressed(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode compressed consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerAssignment(ca)
case removeConsumerOp:
ca, err := decodeConsumerAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode consumer assigment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerRemoval(ca)
didRemove = true
case updateStreamOp:
sa, err := decodeStreamAssignment(buf[1:])
if err != nil {
js.srv.Errorf("JetStream cluster failed to decode stream assignment: %q", buf[1:])
return didSnap, didRemove, err
}
if isRecovering {
js.setStreamAssignmentRecovering(sa)
}
js.processUpdateStreamAssignment(sa)
default:
panic("JetStream Cluster Unknown meta entry op type")
}
}
}
return didSnap, didRemove, nil
}
func (rg *raftGroup) isMember(id string) bool {
if rg == nil {
return false
}
for _, peer := range rg.Peers {
if peer == id {
return true
}
}
return false
}
func (rg *raftGroup) setPreferred() {
if rg == nil || len(rg.Peers) == 0 {
return
}
if len(rg.Peers) == 1 {
rg.Preferred = rg.Peers[0]
} else {
// For now just randomly select a peer for the preferred.
pi := rand.Int31n(int32(len(rg.Peers)))
rg.Preferred = rg.Peers[pi]
}
}
// createRaftGroup is called to spin up this raft group if needed.
func (js *jetStream) createRaftGroup(rg *raftGroup, storage StorageType) error {
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// If this is a single peer raft group or we are not a member return.
if len(rg.Peers) <= 1 || !rg.isMember(cc.meta.ID()) {
// Nothing to do here.
return nil
}
// We already have this assigned.
if node := s.lookupRaftNode(rg.Name); node != nil {
s.Debugf("JetStream cluster already has raft group %q assigned", rg.Name)
rg.node = node
return nil
}
s.Debugf("JetStream cluster creating raft group:%+v", rg)
sysAcc := s.SystemAccount()
if sysAcc == nil {
s.Debugf("JetStream cluster detected shutdown processing raft group: %+v", rg)
return errors.New("shutting down")
}
storeDir := path.Join(js.config.StoreDir, sysAcc.Name, defaultStoreDirName, rg.Name)
var store StreamStore
if storage == FileStorage {
fs, err := newFileStore(
FileStoreConfig{StoreDir: storeDir, BlockSize: 4_000_000, AsyncFlush: false, SyncInterval: 5 * time.Minute},
StreamConfig{Name: rg.Name, Storage: FileStorage},
)
if err != nil {
s.Errorf("Error creating filestore WAL: %v", err)
return err
}
store = fs
} else {
ms, err := newMemStore(&StreamConfig{Name: rg.Name, Storage: MemoryStorage})
if err != nil {
s.Errorf("Error creating memstore WAL: %v", err)
return err
}
store = ms
}
cfg := &RaftConfig{Name: rg.Name, Store: storeDir, Log: store, Track: true}
if _, err := readPeerState(storeDir); err != nil {
s.bootstrapRaftNode(cfg, rg.Peers, true)
}
n, err := s.startRaftNode(cfg)
if err != nil {
s.Debugf("Error creating raft group: %v", err)
return err
}
rg.node = n
// See if we are preferred and should start campaign immediately.
if n.ID() == rg.Preferred {
n.Campaign()
}
return nil
}
func (mset *stream) raftGroup() *raftGroup {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
if mset.sa == nil {
return nil
}
return mset.sa.Group
}
func (mset *stream) raftNode() RaftNode {
if mset == nil {
return nil
}
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.node
}
// Monitor our stream node for this stream.
func (js *jetStream) monitorStream(mset *stream, sa *streamAssignment) {
s, cc, n := js.server(), js.cluster, sa.Group.node
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for '%s > %s", sa.Client.serviceAccount(), sa.Config.Name)
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
s.Debugf("Starting stream monitor for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
defer s.Debugf("Exiting stream monitor for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
const (
compactInterval = 2 * time.Minute
compactSizeMin = 32 * 1024 * 1024
compactNumMin = 8192
)
t := time.NewTicker(compactInterval)
defer t.Stop()
js.mu.RLock()
isLeader := cc.isStreamLeader(sa.Client.serviceAccount(), sa.Config.Name)
isRestore := sa.Restore != nil
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
s.Warnf("Could not retrieve account for stream '%s > %s", sa.Client.serviceAccount(), sa.Config.Name)
return
}
var lastSnap []byte
// Should only to be called from leader.
doSnapshot := func() {
if mset == nil || isRestore {
return
}
if snap := mset.stateSnapshot(); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
}
}
}
// We will establish a restoreDoneCh no matter what. Will never be triggered unless
// we replace with the restore chan.
restoreDoneCh := make(<-chan error)
isRecovering := true
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
isRecovering = false
// Check on startup if we should snapshot/compact.
if _, b := n.Size(); b > compactSizeMin || n.NeedSnapshot() {
doSnapshot()
}
continue
}
// Apply our entries.
// TODO mset may be nil see doSnapshot(). applyStreamEntries is sensitive to this.
if err := js.applyStreamEntries(mset, ce, isRecovering); err == nil {
ne, nb := n.Applied(ce.Index)
// If we have at least min entries to compact, go ahead and snapshot/compact.
if ne >= compactNumMin || nb > compactSizeMin {
doSnapshot()
}
} else if err == errLastSeqMismatch {
s.Warnf("Got stream sequence mismatch for '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
if mset.resetClusteredState() {
return
}
} else {
s.Warnf("Error applying entries to '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
}
case isLeader = <-lch:
if isLeader {
if isRestore {
acc, _ := s.LookupAccount(sa.Client.serviceAccount())
restoreDoneCh = s.processStreamRestore(sa.Client, acc, sa.Config, _EMPTY_, sa.Reply, _EMPTY_)
continue
} else if n.NeedSnapshot() {
doSnapshot()
}
} else if n.GroupLeader() != noLeader {
js.setStreamAssignmentRecovering(sa)
}
js.processStreamLeaderChange(mset, isLeader)
case <-t.C:
doSnapshot()
case err := <-restoreDoneCh:
// We have completed a restore from snapshot on this server. The stream assignment has
// already been assigned but the replicas will need to catch up out of band. Consumers
// will need to be assigned by forwarding the proposal and stamping the initial state.
s.Debugf("Stream restore for '%s > %s' completed", sa.Client.serviceAccount(), sa.Config.Name)
if err != nil {
s.Debugf("Stream restore failed: %v", err)
}
isRestore = false
sa.Restore = nil
// If we were successful lookup up our stream now.
if err == nil {
mset, err = acc.lookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
}
}
if err != nil {
if mset != nil {
mset.delete()
}
js.mu.Lock()
sa.err = err
if n != nil {
n.Delete()
}
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result)
return
}
if !isLeader {
panic("Finished restore but not leader")
}
// Trigger the stream followers to catchup.
if n := mset.raftNode(); n != nil {
n.SendSnapshot(mset.stateSnapshot())
}
js.processStreamLeaderChange(mset, isLeader)
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.getConsumers(); len(consumers) > 0 {
for _, o := range mset.getConsumers() {
rg := cc.createGroupForConsumer(sa)
// Pick a preferred leader.
rg.setPreferred()
name, cfg := o.String(), o.config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.createdTime(),
State: o.readStoreState(),
}
// We make these compressed in case state is complex.
addEntry := encodeAddConsumerAssignmentCompressed(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.serviceAccount(), sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
if meta != nil {
meta.ForwardProposal(addEntry)
} else {
return
}
} else {
return
}
}
}()
}
}
}
}
}
// resetClusteredState is called when a clustered stream had a sequence mismatch and needs to be reset.
func (mset *stream) resetClusteredState() bool {
mset.mu.RLock()
s, js, sa, acc, node := mset.srv, mset.js, mset.sa, mset.acc, mset.node
stype, isLeader := mset.cfg.Storage, mset.isLeader()
mset.mu.RUnlock()
// Stepdown regardless if we are the leader here.
if isLeader && node != nil {
node.StepDown()
}
if js.limitsExceeded(stype) {
s.Debugf("Will not reset stream, resources exceeded")
return false
}
// We delete our raft state. Will recreate.
if node != nil {
node.Delete()
}
// Preserve our current state and messages.
mset.stop(false, false)
if sa != nil {
s.Warnf("Resetting stream '%s > %s'", sa.Client.serviceAccount(), sa.Config.Name)
js.mu.Lock()
sa.Group.node = nil
js.mu.Unlock()
go js.processClusterCreateStream(acc, sa)
}
return true
}
func (mset *stream) checkForFlowControl(seq uint64) {
mset.mu.Lock()
if mset.fcr != nil {
if rply := mset.fcr[seq]; rply != _EMPTY_ {
delete(mset.fcr, seq)
mset.outq.send(&jsPubMsg{rply, _EMPTY_, _EMPTY_, nil, nil, nil, 0, nil})
} else if len(mset.fcr) > 0 {
for fseq, rply := range mset.fcr {
if fseq < seq {
delete(mset.fcr, fseq)
mset.outq.send(&jsPubMsg{rply, _EMPTY_, _EMPTY_, nil, nil, nil, 0, nil})
}
}
}
}
mset.mu.Unlock()
}
func (js *jetStream) applyStreamEntries(mset *stream, ce *CommittedEntry, isRecovering bool) error {
for _, e := range ce.Entries {
if e.Type == EntryNormal {
buf := e.Data
switch entryOp(buf[0]) {
case streamMsgOp:
if mset == nil {
continue
}
subject, reply, hdr, msg, lseq, ts, err := decodeStreamMsg(buf[1:])
if err != nil {
panic(err.Error())
}
// We can skip if we know this is less than what we already have.
last := mset.lastSeq()
if lseq < last {
continue
}
// Skip by hand here since first msg special case.
// Reason is sequence is unsigned and for lseq being 0
// the lseq under stream would have be -1.
if lseq == 0 && last != 0 {
continue
}
// Check for flowcontrol here.
mset.checkForFlowControl(lseq + 1)
s := js.srv
if err := mset.processJetStreamMsg(subject, reply, hdr, msg, lseq, ts); err != nil {
if !isRecovering {
if err == errLastSeqMismatch {
return err
}
s.Debugf("Got error processing JetStream msg: %v", err)
}
if isOutOfSpaceErr(err) {
s.handleOutOfSpace(mset.name())
return err
}
}
case deleteMsgOp:
md, err := decodeMsgDelete(buf[1:])
if err != nil {
panic(err.Error())
}
s, cc := js.server(), js.cluster
var removed bool
if md.NoErase {
removed, err = mset.removeMsg(md.Seq)
} else {
removed, err = mset.eraseMsg(md.Seq)
}
if err != nil && !isRecovering {
s.Debugf("JetStream cluster failed to delete msg %d from stream %q for account %q: %v",
md.Seq, md.Stream, md.Client.serviceAccount(), err)
}
js.mu.RLock()
isLeader := cc.isStreamLeader(md.Client.serviceAccount(), md.Stream)
js.mu.RUnlock()
if isLeader && !isRecovering {
var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else if !removed {
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", md.Seq)}
s.sendAPIErrResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(md.Client, mset.account(), md.Subject, md.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
case purgeStreamOp:
sp, err := decodeStreamPurge(buf[1:])
if err != nil {
panic(err.Error())
}
// Ignore if we are recovering and we have already processed.
if isRecovering {
if mset.state().FirstSeq <= sp.LastSeq {
// Make sure all messages from the purge are gone.
mset.store.Compact(sp.LastSeq + 1)
}
continue
}
s := js.server()
purged, err := mset.purge()
if err != nil {
s.Warnf("JetStream cluster failed to purge stream %q for account %q: %v", sp.Stream, sp.Client.serviceAccount(), err)
}
js.mu.RLock()
isLeader := js.cluster.isStreamLeader(sp.Client.serviceAccount(), sp.Stream)
js.mu.RUnlock()
if isLeader && !isRecovering {
var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Purged = purged
resp.Success = true
s.sendAPIResponse(sp.Client, mset.account(), sp.Subject, sp.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
default:
panic("JetStream Cluster Unknown group entry op type!")
}
} else if e.Type == EntrySnapshot {
if !isRecovering && mset != nil {
var snap streamSnapshot
if err := json.Unmarshal(e.Data, &snap); err != nil {
return err
}
mset.processSnapshot(&snap)
}
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
ourID := js.cluster.meta.ID()
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
shouldDelete := true
if sa := mset.streamAssignment(); sa != nil {
js.mu.Lock()
// Make sure we are not part of this assignment. If we are
// we need to ignore this remove.
if sa.Group.isMember(ourID) {
shouldDelete = false
} else {
if node := sa.Group.node; node != nil {
node.ProposeRemovePeer(ourID)
}
sa.Group.node = nil
sa.err = nil
}
js.mu.Unlock()
}
if shouldDelete {
mset.stop(true, false)
}
}
return nil
}
}
return nil
}
// Returns the PeerInfo for all replicas of a raft node. This is different than node.Peers()
// and is used for external facing advisories.
func (s *Server) replicas(node RaftNode) []*PeerInfo {
now := time.Now()
var replicas []*PeerInfo
for _, rp := range node.Peers() {
if sir, ok := s.nodeToInfo.Load(rp.ID); ok && sir != nil {
si := sir.(nodeInfo)
pi := &PeerInfo{Name: si.name, Current: rp.Current, Active: now.Sub(rp.Last), Offline: si.offline, Lag: rp.Lag}
replicas = append(replicas, pi)
}
}
return replicas
}
// Will check our node peers and see if we should remove a peer.
func (js *jetStream) checkPeers(rg *raftGroup) {
js.mu.Lock()
defer js.mu.Unlock()
// FIXME(dlc) - Single replicas?
if rg == nil || rg.node == nil {
return
}
for _, peer := range rg.node.Peers() {
if !rg.isMember(peer.ID) {
rg.node.ProposeRemovePeer(peer.ID)
}
}
}
func (js *jetStream) processStreamLeaderChange(mset *stream, isLeader bool) {
if mset == nil {
return
}
sa := mset.streamAssignment()
if sa == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, sa.Client.serviceAccount(), sa.err
client, subject, reply := sa.Client, sa.Subject, sa.Reply
hasResponded := sa.responded
sa.responded = true
js.mu.Unlock()
streamName := mset.name()
if isLeader {
s.Noticef("JetStream cluster new stream leader for '%s > %s'", sa.Client.serviceAccount(), streamName)
s.sendStreamLeaderElectAdvisory(mset)
// Check for peer removal and process here if needed.
js.checkPeers(sa.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := mset.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > 5*time.Second {
s.sendStreamLostQuorumAdvisory(mset)
}
}
// Tell stream to switch leader status.
mset.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
// Send our response.
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.StreamInfo = &StreamInfo{
Created: mset.createdTime(),
State: mset.state(),
Config: mset.config(),
Cluster: js.clusterInfo(mset.raftGroup()),
Sources: mset.sourcesInfo(),
Mirror: mset.mirrorInfo(),
}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := mset.raftNode(); node != nil {
mset.sendCreateAdvisory()
}
}
}
// Fixed value ok for now.
const lostQuorumAdvInterval = 10 * time.Second
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (mset *stream) shouldSendLostQuorum() bool {
mset.mu.Lock()
defer mset.mu.Unlock()
if time.Since(mset.lqsent) >= lostQuorumAdvInterval {
mset.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendStreamLostQuorumAdvisory(mset *stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.name(), mset.account()
if node == nil {
return
}
if !mset.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster stream '%s > %s' has NO quorum, stalled.", acc.GetName(), stream)
subj := JSAdvisoryStreamQuorumLostPre + "." + stream
adv := &JSStreamQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendStreamLeaderElectAdvisory(mset *stream) {
if mset == nil {
return
}
node, stream, acc := mset.raftNode(), mset.name(), mset.account()
if node == nil {
return
}
subj := JSAdvisoryStreamLeaderElectedPre + "." + stream
adv := &JSStreamLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSStreamLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
// Will lookup a stream assignment.
// Lock should be held.
func (js *jetStream) streamAssignment(account, stream string) (sa *streamAssignment) {
cc := js.cluster
if cc == nil {
return nil
}
if as := cc.streams[account]; as != nil {
sa = as[stream]
}
return sa
}
// processStreamAssignment is called when followers have replicated an assignment.
func (js *jetStream) processStreamAssignment(sa *streamAssignment) bool {
js.mu.RLock()
s, cc := js.srv, js.cluster
js.mu.RUnlock()
if s == nil || cc == nil {
// TODO(dlc) - debug at least
return false
}
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return false
}
stream := sa.Config.Name
js.mu.Lock()
if cc.meta == nil {
js.mu.Unlock()
return false
}
ourID := cc.meta.ID()
var isMember bool
if sa.Group != nil {
isMember = sa.Group.isMember(ourID)
}
accStreams := cc.streams[acc.Name]
if accStreams == nil {
accStreams = make(map[string]*streamAssignment)
} else if osa := accStreams[stream]; osa != nil {
// Copy over private existing state from former SA.
sa.Group.node = osa.Group.node
sa.consumers = osa.consumers
sa.responded = osa.responded
sa.err = osa.err
}
// Update our state.
accStreams[stream] = sa
cc.streams[acc.Name] = accStreams
js.mu.Unlock()
var didRemove bool
// Check if this is for us..
if isMember {
js.processClusterCreateStream(acc, sa)
} else {
// Clear our raft node here.
// TODO(dlc) - This might be better if done by leader, not the one who is being removed
// since we are most likely offline.
js.mu.Lock()
if node := sa.Group.node; node != nil {
node.ProposeRemovePeer(ourID)
didRemove = true
}
sa.Group.node = nil
sa.err = nil
js.mu.Unlock()
if mset, _ := acc.lookupStream(sa.Config.Name); mset != nil {
// We have one here even though we are not a member. This can happen on re-assignment.
s.Debugf("JetStream removing stream '%s > %s' from this server, reassigned", sa.Client.serviceAccount(), sa.Config.Name)
mset.stop(true, false)
}
}
return didRemove
}
// processUpdateStreamAssignment is called when followers have replicated an updated assignment.
func (js *jetStream) processUpdateStreamAssignment(sa *streamAssignment) {
js.mu.RLock()
s, cc := js.srv, js.cluster
js.mu.RUnlock()
if s == nil || cc == nil {
// TODO(dlc) - debug at least
return
}
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return
}
stream := sa.Config.Name
js.mu.Lock()
if cc.meta == nil {
js.mu.Unlock()
return
}
ourID := cc.meta.ID()
var isMember bool
if sa.Group != nil {
isMember = sa.Group.isMember(ourID)
}
accStreams := cc.streams[acc.Name]
if accStreams == nil {
js.mu.Unlock()
return
}
osa := accStreams[stream]
if osa == nil {
js.mu.Unlock()
return
}
// Copy over private existing state from former SA.
sa.Group.node = osa.Group.node
sa.consumers = osa.consumers
sa.err = osa.err
// Update our state.
accStreams[stream] = sa
cc.streams[acc.Name] = accStreams
// Make sure we respond.
if isMember {
sa.responded = false
}
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterUpdateStream(acc, sa)
} else if mset, _ := acc.lookupStream(sa.Config.Name); mset != nil {
// We have one here even though we are not a member. This can happen on re-assignment.
s.Debugf("JetStream removing stream '%s > %s' from this server, re-assigned", sa.Client.serviceAccount(), sa.Config.Name)
if node := mset.raftNode(); node != nil {
node.ProposeRemovePeer(ourID)
}
mset.stop(true, false)
}
}
// processClusterUpdateStream is called when we have a stream assignment that
// has been updated for an existing assignment.
func (js *jetStream) processClusterUpdateStream(acc *Account, sa *streamAssignment) {
if sa == nil {
return
}
js.mu.Lock()
s, rg := js.srv, sa.Group
client, subject, reply := sa.Client, sa.Subject, sa.Reply
alreadyRunning := rg.node != nil
hasResponded := sa.responded
sa.responded = true
js.mu.Unlock()
mset, err := acc.lookupStream(sa.Config.Name)
if err == nil && mset != nil {
osa := mset.streamAssignment()
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorStream(mset, sa) })
}
mset.setStreamAssignment(sa)
if err = mset.update(sa.Config); err != nil {
s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err)
mset.setStreamAssignment(osa)
}
}
if err != nil {
js.mu.Lock()
sa.err = err
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}},
Update: true,
}
result.Response.Error = jsError(err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result)
return
}
mset.mu.RLock()
isLeader := mset.isLeader()
mset.mu.RUnlock()
if !isLeader || hasResponded {
return
}
// Send our response.
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
resp.StreamInfo = &StreamInfo{
Created: mset.createdTime(),
State: mset.state(),
Config: mset.config(),
Cluster: js.clusterInfo(mset.raftGroup()),
Mirror: mset.mirrorInfo(),
Sources: mset.sourcesInfo(),
}
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
}
// processClusterCreateStream is called when we have a stream assignment that
// has been committed and this server is a member of the peer group.
func (js *jetStream) processClusterCreateStream(acc *Account, sa *streamAssignment) {
if sa == nil {
return
}
js.mu.RLock()
s, rg := js.srv, sa.Group
alreadyRunning := rg.node != nil
storage := sa.Config.Storage
js.mu.RUnlock()
// Process the raft group and make sure it's running if needed.
err := js.createRaftGroup(rg, storage)
// If we are restoring, create the stream if we are R>1 and not the preferred who handles the
// receipt of the snapshot itself.
shouldCreate := true
if sa.Restore != nil {
if len(rg.Peers) == 1 || rg.node != nil && rg.node.ID() == rg.Preferred {
shouldCreate = false
} else {
sa.Restore = nil
}
}
// Our stream.
var mset *stream
// Process here if not restoring or not the leader.
if shouldCreate && err == nil {
// Go ahead and create or update the stream.
mset, err = acc.lookupStream(sa.Config.Name)
if err == nil && mset != nil {
osa := mset.streamAssignment()
mset.setStreamAssignment(sa)
if err = mset.update(sa.Config); err != nil {
s.Warnf("JetStream cluster error updating stream %q for account %q: %v", sa.Config.Name, acc.Name, err)
mset.setStreamAssignment(osa)
}
} else if err == ErrJetStreamStreamNotFound {
// Add in the stream here.
mset, err = acc.addStreamWithAssignment(sa.Config, nil, sa)
}
if mset != nil {
mset.setCreatedTime(sa.Created)
}
}
// This is an error condition.
if err != nil {
s.Warnf("Stream create failed for '%s > %s': %v", sa.Client.serviceAccount(), sa.Config.Name, err)
js.mu.Lock()
sa.err = err
hasResponded := sa.responded
// If out of space do nothing for now.
if isOutOfSpaceErr(err) {
hasResponded = true
}
if rg.node != nil {
rg.node.Delete()
}
var result *streamAssignmentResult
if !hasResponded {
result = &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Response: &JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}},
}
result.Response.Error = jsError(err)
}
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
if result != nil {
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, result)
}
return
}
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorStream(mset, sa) })
}
} else {
// Single replica stream, process manually here.
// If we are restoring, process that first.
if sa.Restore != nil {
// We are restoring a stream here.
restoreDoneCh := s.processStreamRestore(sa.Client, acc, sa.Config, _EMPTY_, sa.Reply, _EMPTY_)
s.startGoRoutine(func() {
defer s.grWG.Done()
select {
case err := <-restoreDoneCh:
if err == nil {
mset, err = acc.lookupStream(sa.Config.Name)
if mset != nil {
mset.setStreamAssignment(sa)
mset.setCreatedTime(sa.Created)
}
}
if err != nil {
if mset != nil {
mset.delete()
}
js.mu.Lock()
sa.err = err
result := &streamAssignmentResult{
Account: sa.Client.serviceAccount(),
Stream: sa.Config.Name,
Restore: &JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}},
}
result.Restore.Error = jsError(sa.err)
js.mu.Unlock()
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(streamAssignmentSubj, _EMPTY_, nil, b)
return
}
js.processStreamLeaderChange(mset, true)
// Check to see if we have restored consumers here.
// These are not currently assigned so we will need to do so here.
if consumers := mset.getConsumers(); len(consumers) > 0 {
js.mu.RLock()
cc := js.cluster
js.mu.RUnlock()
for _, o := range consumers {
rg := cc.createGroupForConsumer(sa)
name, cfg := o.String(), o.config()
// Place our initial state here as well for assignment distribution.
ca := &consumerAssignment{
Group: rg,
Stream: sa.Config.Name,
Name: name,
Config: &cfg,
Client: sa.Client,
Created: o.createdTime(),
}
addEntry := encodeAddConsumerAssignment(ca)
cc.meta.ForwardProposal(addEntry)
// Check to make sure we see the assignment.
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
js.mu.RLock()
ca, meta := js.consumerAssignment(ca.Client.serviceAccount(), sa.Config.Name, name), cc.meta
js.mu.RUnlock()
if ca == nil {
s.Warnf("Consumer assignment has not been assigned, retrying")
if meta != nil {
meta.ForwardProposal(addEntry)
} else {
return
}
} else {
return
}
}
}()
}
}
case <-s.quitCh:
return
}
})
} else {
js.processStreamLeaderChange(mset, true)
}
}
}
// processStreamRemoval is called when followers have replicated an assignment.
func (js *jetStream) processStreamRemoval(sa *streamAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
stream := sa.Config.Name
isMember := sa.Group.isMember(cc.meta.ID())
wasLeader := cc.isStreamLeader(sa.Client.serviceAccount(), stream)
// Check if we already have this assigned.
accStreams := cc.streams[sa.Client.serviceAccount()]
needDelete := accStreams != nil && accStreams[stream] != nil
if needDelete {
delete(accStreams, stream)
if len(accStreams) == 0 {
delete(cc.streams, sa.Client.serviceAccount())
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteStream(sa, isMember, wasLeader)
}
}
func (js *jetStream) processClusterDeleteStream(sa *streamAssignment, isMember, wasLeader bool) {
if sa == nil {
return
}
js.mu.RLock()
s := js.srv
hadLeader := sa.Group.node == nil || sa.Group.node.GroupLeader() != noLeader
js.mu.RUnlock()
acc, err := s.LookupAccount(sa.Client.serviceAccount())
if err != nil {
s.Debugf("JetStream cluster failed to lookup account %q: %v", sa.Client.serviceAccount(), err)
return
}
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
// Go ahead and delete the stream.
mset, err := acc.lookupStream(sa.Config.Name)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
err = mset.stop(true, wasLeader)
}
if sa.Group.node != nil {
sa.Group.node.Delete()
}
if !isMember || !wasLeader && hadLeader {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// processConsumerAssignment is called when followers have replicated an assignment for a consumer.
func (js *jetStream) processConsumerAssignment(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
// TODO(dlc) - log error
return
}
sa := js.streamAssignment(ca.Client.serviceAccount(), ca.Stream)
if sa == nil {
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.serviceAccount(), ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
if sa.consumers == nil {
sa.consumers = make(map[string]*consumerAssignment)
} else if oca := sa.consumers[ca.Name]; oca != nil {
// Copy over private existing state from former CA.
ca.Group.node = oca.Group.node
ca.responded = oca.responded
ca.err = oca.err
}
// Place into our internal map under the stream assignment.
// Ok to replace an existing one, we check on process call below.
sa.consumers[ca.Name] = ca
// See if we are a member
ourID := cc.meta.ID()
isMember := ca.Group.isMember(ourID)
js.mu.Unlock()
// Check if this is for us..
if isMember {
js.processClusterCreateConsumer(ca)
} else {
// Clear our raft node here.
// TODO(dlc) - This might be better if done by leader, not the one who is being removed
// since we are most likely offline.
js.mu.Lock()
if node := ca.Group.node; node != nil {
node.ProposeRemovePeer(ourID)
}
ca.Group.node = nil
ca.err = nil
js.mu.Unlock()
// We are not a member, if we have this consumer on this
// server remove it.
if mset, _ := acc.lookupStream(ca.Stream); mset != nil {
if o := mset.lookupConsumer(ca.Name); o != nil {
s.Debugf("JetStream removing consumer '%s > %s > %s' from this server, re-assigned",
ca.Client.serviceAccount(), ca.Stream, ca.Name)
o.stopWithFlags(true, false, false)
}
}
}
}
func (js *jetStream) processConsumerRemoval(ca *consumerAssignment) {
js.mu.Lock()
s, cc := js.srv, js.cluster
if s == nil || cc == nil || cc.meta == nil {
// TODO(dlc) - debug at least
js.mu.Unlock()
return
}
isMember := ca.Group.isMember(cc.meta.ID())
wasLeader := cc.isConsumerLeader(ca.Client.serviceAccount(), ca.Stream, ca.Name)
// Delete from our state.
var needDelete bool
if accStreams := cc.streams[ca.Client.serviceAccount()]; accStreams != nil {
if sa := accStreams[ca.Stream]; sa != nil && sa.consumers != nil && sa.consumers[ca.Name] != nil {
needDelete = true
delete(sa.consumers, ca.Name)
}
}
js.mu.Unlock()
if needDelete {
js.processClusterDeleteConsumer(ca, isMember, wasLeader)
}
}
type consumerAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Consumer string `json:"consumer"`
Response *JSApiConsumerCreateResponse `json:"response,omitempty"`
}
// processClusterCreateConsumer is when we are a member fo the group and need to create the consumer.
func (js *jetStream) processClusterCreateConsumer(ca *consumerAssignment) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.serviceAccount(), err)
js.mu.RUnlock()
return
}
rg := ca.Group
alreadyRunning := rg.node != nil
js.mu.RUnlock()
// Go ahead and create or update the consumer.
mset, err := acc.lookupStream(ca.Stream)
if err != nil {
js.mu.Lock()
s.Debugf("Consumer create failed, could not locate stream '%s > %s'", ca.Client.serviceAccount(), ca.Stream)
ca.err = ErrJetStreamStreamNotFound
result := &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
js.mu.Unlock()
return
}
// Process the raft group and make sure its running if needed.
js.createRaftGroup(rg, mset.config().Storage)
// Check if we already have this consumer running.
o := mset.lookupConsumer(ca.Name)
if o != nil {
if o.isDurable() && o.isPushMode() {
ocfg := o.config()
if configsEqualSansDelivery(ocfg, *ca.Config) && o.hasNoLocalInterest() {
o.updateDeliverSubject(ca.Config.DeliverSubject)
}
}
o.setConsumerAssignment(ca)
s.Debugf("JetStream cluster, consumer was already running")
}
// Add in the consumer if needed.
if o == nil {
o, err = mset.addConsumerWithAssignment(ca.Config, ca.Name, ca)
}
// If we have an initial state set apply that now.
if ca.State != nil && o != nil {
err = o.setStoreState(ca.State)
}
if err != nil {
s.Warnf("Consumer create failed for '%s > %s > %s': %v\n", ca.Client.serviceAccount(), ca.Stream, ca.Name, err)
js.mu.Lock()
ca.err = err
hasResponded := ca.responded
// If out of space do nothing for now.
if isOutOfSpaceErr(err) {
hasResponded = true
}
if rg.node != nil {
rg.node.Delete()
}
var result *consumerAssignmentResult
if !hasResponded {
result = &consumerAssignmentResult{
Account: ca.Client.serviceAccount(),
Stream: ca.Stream,
Consumer: ca.Name,
Response: &JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}},
}
result.Response.Error = jsError(err)
} else if err == errNoInterest {
// This is a stranded ephemeral, let's clean this one up.
subject := fmt.Sprintf(JSApiConsumerDeleteT, ca.Stream, ca.Name)
mset.outq.send(&jsPubMsg{subject, _EMPTY_, _EMPTY_, nil, nil, nil, 0, nil})
}
js.mu.Unlock()
if result != nil {
// Send response to the metadata leader. They will forward to the user as needed.
b, _ := json.Marshal(result) // Avoids auto-processing and doing fancy json with newlines.
s.sendInternalMsgLocked(consumerAssignmentSubj, _EMPTY_, nil, b)
}
} else {
o.setCreatedTime(ca.Created)
// Start our monitoring routine.
if rg.node != nil {
if !alreadyRunning {
s.startGoRoutine(func() { js.monitorConsumer(o, ca) })
}
} else {
// Single replica consumer, process manually here.
js.processConsumerLeaderChange(o, true)
}
}
}
func (js *jetStream) processClusterDeleteConsumer(ca *consumerAssignment, isMember, wasLeader bool) {
if ca == nil {
return
}
js.mu.RLock()
s := js.srv
js.mu.RUnlock()
acc, err := s.LookupAccount(ca.Client.serviceAccount())
if err != nil {
s.Warnf("JetStream cluster failed to lookup account %q: %v", ca.Client.serviceAccount(), err)
return
}
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
// Go ahead and delete the consumer.
mset, err := acc.lookupStream(ca.Stream)
if err != nil {
resp.Error = jsNotFoundError(err)
} else if mset != nil {
if o := mset.lookupConsumer(ca.Name); o != nil {
err = o.stopWithFlags(true, true, wasLeader)
} else {
resp.Error = jsNoConsumerErr
}
}
if ca.Group.node != nil {
ca.Group.node.Delete()
}
if !wasLeader || ca.Reply == _EMPTY_ {
return
}
if err != nil {
if resp.Error == nil {
resp.Error = jsError(err)
}
s.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
} else {
resp.Success = true
s.sendAPIResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(resp))
}
}
// Returns the consumer assignment, or nil if not present.
// Lock should be held.
func (js *jetStream) consumerAssignment(account, stream, consumer string) *consumerAssignment {
if sa := js.streamAssignment(account, stream); sa != nil {
return sa.consumers[consumer]
}
return nil
}
// consumerAssigned informs us if this server has this consumer assigned.
func (jsa *jsAccount) consumerAssigned(stream, consumer string) bool {
jsa.mu.RLock()
defer jsa.mu.RUnlock()
js, acc := jsa.js, jsa.account
if js == nil {
return false
}
return js.cluster.isConsumerAssigned(acc, stream, consumer)
}
// Read lock should be held.
func (cc *jetStreamCluster) isConsumerAssigned(a *Account, stream, consumer string) bool {
// Non-clustered mode always return true.
if cc == nil {
return true
}
var sa *streamAssignment
accStreams := cc.streams[a.Name]
if accStreams != nil {
sa = accStreams[stream]
}
if sa == nil {
// TODO(dlc) - This should not happen.
return false
}
ca := sa.consumers[consumer]
if ca == nil {
return false
}
rg := ca.Group
// Check if we are the leader of this raftGroup assigned to the stream.
ourID := cc.meta.ID()
for _, peer := range rg.Peers {
if peer == ourID {
return true
}
}
return false
}
func (o *consumer) raftGroup() *raftGroup {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
if o.ca == nil {
return nil
}
return o.ca.Group
}
func (o *consumer) raftNode() RaftNode {
if o == nil {
return nil
}
o.mu.RLock()
defer o.mu.RUnlock()
return o.node
}
func (js *jetStream) monitorConsumer(o *consumer, ca *consumerAssignment) {
s, n := js.server(), o.raftNode()
defer s.grWG.Done()
if n == nil {
s.Warnf("No RAFT group for consumer")
return
}
qch, lch, ach := n.QuitC(), n.LeadChangeC(), n.ApplyC()
s.Debugf("Starting consumer monitor for '%s > %s > %s", o.acc.Name, ca.Stream, ca.Name)
defer s.Debugf("Exiting consumer monitor for '%s > %s > %s'", o.acc.Name, ca.Stream, ca.Name)
const (
compactInterval = 2 * time.Minute
compactSizeMin = 8 * 1024 * 1024
compactNumMin = 8192
)
t := time.NewTicker(compactInterval)
defer t.Stop()
var lastSnap []byte
// Should only to be called from leader.
doSnapshot := func() {
if state, err := o.store.State(); err == nil && state != nil {
if snap := encodeConsumerState(state); !bytes.Equal(lastSnap, snap) {
if err := n.InstallSnapshot(snap); err == nil {
lastSnap = snap
}
}
}
}
// Track if we are leader.
var isLeader bool
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case ce := <-ach:
// No special processing needed for when we are caught up on restart.
if ce == nil {
if n.NeedSnapshot() {
doSnapshot()
}
continue
}
if err := js.applyConsumerEntries(o, ce, isLeader); err == nil {
ne, nb := n.Applied(ce.Index)
// If we have at least min entries to compact, go ahead and snapshot/compact.
if nb > 0 && ne >= compactNumMin || nb > compactSizeMin {
doSnapshot()
}
} else {
s.Warnf("Error applying consumer entries to '%s > %s'", ca.Client.serviceAccount(), ca.Name)
}
case isLeader = <-lch:
if !isLeader && n.GroupLeader() != noLeader {
js.setConsumerAssignmentRecovering(ca)
}
js.processConsumerLeaderChange(o, isLeader)
case <-t.C:
doSnapshot()
}
}
}
func (js *jetStream) applyConsumerEntries(o *consumer, ce *CommittedEntry, isLeader bool) error {
for _, e := range ce.Entries {
if e.Type == EntrySnapshot {
// No-op needed?
state, err := decodeConsumerState(e.Data)
if err != nil {
panic(err.Error())
}
o.store.Update(state)
} else if e.Type == EntryRemovePeer {
js.mu.RLock()
var ourID string
if js.cluster != nil && js.cluster.meta != nil {
ourID = js.cluster.meta.ID()
}
js.mu.RUnlock()
if peer := string(e.Data); peer == ourID {
shouldDelete := true
if ca := o.consumerAssignment(); ca != nil {
js.mu.Lock()
if ca.Group.isMember(ourID) {
shouldDelete = false
} else {
if node := ca.Group.node; node != nil {
node.ProposeRemovePeer(ourID)
}
ca.Group.node = nil
ca.err = nil
}
js.mu.Unlock()
}
if shouldDelete {
o.stopWithFlags(true, false, false)
}
}
return nil
} else {
// Consumer leaders process these already.
buf := e.Data
switch entryOp(buf[0]) {
case updateDeliveredOp:
// These are handled in place in leaders.
if !isLeader {
dseq, sseq, dc, ts, err := decodeDeliveredUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
if err := o.store.UpdateDelivered(dseq, sseq, dc, ts); err != nil {
panic(err.Error())
}
}
case updateAcksOp:
dseq, sseq, err := decodeAckUpdate(buf[1:])
if err != nil {
panic(err.Error())
}
o.processReplicatedAck(dseq, sseq)
case updateSkipOp:
o.mu.Lock()
if !o.isLeader() {
var le = binary.LittleEndian
o.sseq = le.Uint64(buf[1:])
}
o.mu.Unlock()
default:
panic(fmt.Sprintf("JetStream Cluster Unknown group entry op type! %v", entryOp(buf[0])))
}
}
}
return nil
}
func (o *consumer) processReplicatedAck(dseq, sseq uint64) {
o.store.UpdateAcks(dseq, sseq)
o.mu.RLock()
mset := o.mset
if mset == nil || mset.cfg.Retention != InterestPolicy {
o.mu.RUnlock()
return
}
var sagap uint64
if o.cfg.AckPolicy == AckAll {
if o.isLeader() {
sagap = sseq - o.asflr
} else {
// We are a follower so only have the store state, so read that in.
state, err := o.store.State()
if err != nil {
o.mu.RUnlock()
return
}
sagap = sseq - state.AckFloor.Stream
}
}
o.mu.RUnlock()
if sagap > 1 {
// FIXME(dlc) - This is very inefficient, will need to fix.
for seq := sseq; seq > sseq-sagap; seq-- {
mset.ackMsg(o, seq)
}
} else {
mset.ackMsg(o, sseq)
}
}
var errBadAckUpdate = errors.New("jetstream cluster bad replicated ack update")
var errBadDeliveredUpdate = errors.New("jetstream cluster bad replicated delivered update")
func decodeAckUpdate(buf []byte) (dseq, sseq uint64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, errBadAckUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, errBadAckUpdate
}
return dseq, sseq, nil
}
func decodeDeliveredUpdate(buf []byte) (dseq, sseq, dc uint64, ts int64, err error) {
var bi, n int
if dseq, n = binary.Uvarint(buf); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if sseq, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if dc, n = binary.Uvarint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
bi += n
if ts, n = binary.Varint(buf[bi:]); n < 0 {
return 0, 0, 0, 0, errBadDeliveredUpdate
}
return dseq, sseq, dc, ts, nil
}
func (js *jetStream) processConsumerLeaderChange(o *consumer, isLeader bool) {
ca := o.consumerAssignment()
if ca == nil {
return
}
js.mu.Lock()
s, account, err := js.srv, ca.Client.serviceAccount(), ca.err
client, subject, reply := ca.Client, ca.Subject, ca.Reply
hasResponded := ca.responded
ca.responded = true
js.mu.Unlock()
streamName := o.streamName()
consumerName := o.String()
acc, _ := s.LookupAccount(account)
if acc == nil {
return
}
if isLeader {
s.Noticef("JetStream cluster new consumer leader for '%s > %s > %s'", ca.Client.serviceAccount(), streamName, consumerName)
s.sendConsumerLeaderElectAdvisory(o)
// Check for peer removal and process here if needed.
js.checkPeers(ca.Group)
} else {
// We are stepping down.
// Make sure if we are doing so because we have lost quorum that we send the appropriate advisories.
if node := o.raftNode(); node != nil && !node.Quorum() && time.Since(node.Created()) > 5*time.Second {
s.sendConsumerLostQuorumAdvisory(o)
}
}
// Tell consumer to switch leader status.
o.setLeader(isLeader)
if !isLeader || hasResponded {
return
}
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
} else {
resp.ConsumerInfo = o.info()
s.sendAPIResponse(client, acc, subject, reply, _EMPTY_, s.jsonResponse(&resp))
if node := o.raftNode(); node != nil {
o.sendCreateAdvisory()
}
}
}
// Determines if we should send lost quorum advisory. We throttle these after first one.
func (o *consumer) shouldSendLostQuorum() bool {
o.mu.Lock()
defer o.mu.Unlock()
if time.Since(o.lqsent) >= lostQuorumAdvInterval {
o.lqsent = time.Now()
return true
}
return false
}
func (s *Server) sendConsumerLostQuorumAdvisory(o *consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.streamName(), o.String(), o.account()
if node == nil {
return
}
if !o.shouldSendLostQuorum() {
return
}
s.Warnf("JetStream cluster consumer '%s > %s > %s' has NO quorum, stalled.", acc.GetName(), stream, consumer)
subj := JSAdvisoryConsumerQuorumLostPre + "." + stream + "." + consumer
adv := &JSConsumerQuorumLostAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerQuorumLostAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
func (s *Server) sendConsumerLeaderElectAdvisory(o *consumer) {
if o == nil {
return
}
node, stream, consumer, acc := o.raftNode(), o.streamName(), o.String(), o.account()
if node == nil {
return
}
subj := JSAdvisoryConsumerLeaderElectedPre + "." + stream + "." + consumer
adv := &JSConsumerLeaderElectedAdvisory{
TypedEvent: TypedEvent{
Type: JSConsumerLeaderElectedAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Stream: stream,
Consumer: consumer,
Leader: s.serverNameForNode(node.GroupLeader()),
Replicas: s.replicas(node),
}
// Send to the user's account if not the system account.
if acc != s.SystemAccount() {
s.publishAdvisory(acc, subj, adv)
}
// Now do system level one. Place account info in adv, and nil account means system.
adv.Account = acc.GetName()
s.publishAdvisory(nil, subj, adv)
}
type streamAssignmentResult struct {
Account string `json:"account"`
Stream string `json:"stream"`
Response *JSApiStreamCreateResponse `json:"create_response,omitempty"`
Restore *JSApiStreamRestoreResponse `json:"restore_response,omitempty"`
Update bool `json:"is_update,omitempty"`
}
// Process error results of stream and consumer assignments.
// Success will be handled by stream leader.
func (js *jetStream) processStreamAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result streamAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
// FIXME(dlc) - suppress duplicates?
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil {
var resp string
if result.Response != nil {
resp = s.jsonResponse(result.Response)
} else if result.Restore != nil {
resp = s.jsonResponse(result.Restore)
}
if !sa.responded || result.Update {
sa.responded = true
js.srv.sendAPIErrResponse(sa.Client, acc, sa.Subject, sa.Reply, _EMPTY_, resp)
}
// Here we will remove this assignment, so this needs to only execute when we are sure
// this is what we want to do.
// TODO(dlc) - Could have mixed results, should track per peer.
// Set sa.err while we are deleting so we will not respond to list/names requests.
if !result.Update && time.Since(sa.Created) < 5*time.Second {
sa.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
}
}
func (js *jetStream) processConsumerAssignmentResults(sub *subscription, c *client, subject, reply string, msg []byte) {
var result consumerAssignmentResult
if err := json.Unmarshal(msg, &result); err != nil {
// TODO(dlc) - log
return
}
acc, _ := js.srv.LookupAccount(result.Account)
if acc == nil {
// TODO(dlc) - log
return
}
js.mu.Lock()
defer js.mu.Unlock()
s, cc := js.srv, js.cluster
if sa := js.streamAssignment(result.Account, result.Stream); sa != nil && sa.consumers != nil {
if ca := sa.consumers[result.Consumer]; ca != nil && !ca.responded {
js.srv.sendAPIErrResponse(ca.Client, acc, ca.Subject, ca.Reply, _EMPTY_, s.jsonResponse(result.Response))
ca.responded = true
// Check if this failed.
// TODO(dlc) - Could have mixed results, should track per peer.
if result.Response.Error != nil {
// So while we are deleting we will not respond to list/names requests.
ca.err = ErrJetStreamNotAssigned
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
}
}
}
const (
streamAssignmentSubj = "$SYS.JSC.STREAM.ASSIGNMENT.RESULT"
consumerAssignmentSubj = "$SYS.JSC.CONSUMER.ASSIGNMENT.RESULT"
)
// Lock should be held.
func (js *jetStream) startUpdatesSub() {
cc, s, c := js.cluster, js.srv, js.cluster.c
if cc.streamResults == nil {
cc.streamResults, _ = s.systemSubscribe(streamAssignmentSubj, _EMPTY_, false, c, js.processStreamAssignmentResults)
}
if cc.consumerResults == nil {
cc.consumerResults, _ = s.systemSubscribe(consumerAssignmentSubj, _EMPTY_, false, c, js.processConsumerAssignmentResults)
}
if cc.stepdown == nil {
cc.stepdown, _ = s.systemSubscribe(JSApiLeaderStepDown, _EMPTY_, false, c, s.jsLeaderStepDownRequest)
}
if cc.peerRemove == nil {
cc.peerRemove, _ = s.systemSubscribe(JSApiRemoveServer, _EMPTY_, false, c, s.jsLeaderServerRemoveRequest)
}
}
// Lock should be held.
func (js *jetStream) stopUpdatesSub() {
cc := js.cluster
if cc.streamResults != nil {
cc.s.sysUnsubscribe(cc.streamResults)
cc.streamResults = nil
}
if cc.consumerResults != nil {
cc.s.sysUnsubscribe(cc.consumerResults)
cc.consumerResults = nil
}
if cc.stepdown != nil {
cc.s.sysUnsubscribe(cc.stepdown)
cc.stepdown = nil
}
if cc.peerRemove != nil {
cc.s.sysUnsubscribe(cc.peerRemove)
cc.peerRemove = nil
}
}
func (js *jetStream) processLeaderChange(isLeader bool) {
if isLeader {
js.srv.Noticef("JetStream cluster new metadata leader")
}
js.mu.Lock()
defer js.mu.Unlock()
if isLeader {
js.startUpdatesSub()
} else {
js.stopUpdatesSub()
// TODO(dlc) - stepdown.
}
}
// Lock should be held.
func (cc *jetStreamCluster) remapStreamAssignment(sa *streamAssignment, removePeer string) bool {
// Need to select a replacement peer
s, now, cluster := cc.s, time.Now(), sa.Client.Cluster
if sa.Config.Placement != nil && sa.Config.Placement.Cluster != _EMPTY_ {
cluster = sa.Config.Placement.Cluster
}
ourID := cc.meta.ID()
for _, p := range cc.meta.Peers() {
// If it is not in our list it's probably shutdown, so don't consider.
if si, ok := s.nodeToInfo.Load(p.ID); !ok || si.(nodeInfo).offline {
continue
}
// Make sure they are active and current and not already part of our group.
current, lastSeen := p.Current, now.Sub(p.Last)
// We do not track activity of ourselves so ignore.
if p.ID == ourID {
lastSeen = 0
}
if !current || lastSeen > lostQuorumInterval || sa.Group.isMember(p.ID) {
continue
}
// Make sure the correct cluster.
if s.clusterNameForNode(p.ID) != cluster {
continue
}
// If we are here we have our candidate replacement, swap out the old one.
for i, peer := range sa.Group.Peers {
if peer == removePeer {
sa.Group.Peers[i] = p.ID
// Don't influence preferred leader.
sa.Group.Preferred = _EMPTY_
return true
}
}
}
return false
}
// selectPeerGroup will select a group of peers to start a raft group.
// TODO(dlc) - For now randomly select. Can be way smarter.
func (cc *jetStreamCluster) selectPeerGroup(r int, cluster string) []string {
var nodes []string
peers := cc.meta.Peers()
s := cc.s
for _, p := range peers {
// If we know its offline or it is not in our list it probably shutdown, so don't consider.
if si, ok := s.nodeToInfo.Load(p.ID); !ok || si.(nodeInfo).offline {
continue
}
if cluster != _EMPTY_ {
if s.clusterNameForNode(p.ID) == cluster {
nodes = append(nodes, p.ID)
}
} else {
nodes = append(nodes, p.ID)
}
}
if len(nodes) < r {
return nil
}
// Don't depend on range to randomize.
rand.Shuffle(len(nodes), func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] })
return nodes[:r]
}
func groupNameForStream(peers []string, storage StorageType) string {
return groupName("S", peers, storage)
}
func groupNameForConsumer(peers []string, storage StorageType) string {
return groupName("C", peers, storage)
}
func groupName(prefix string, peers []string, storage StorageType) string {
var gns string
if len(peers) == 1 {
gns = peers[0]
} else {
gns = string(getHash(nuid.Next()))
}
return fmt.Sprintf("%s-R%d%s-%s", prefix, len(peers), storage.String()[:1], gns)
}
// createGroupForStream will create a group for assignment for the stream.
// Lock should be held.
func (cc *jetStreamCluster) createGroupForStream(ci *ClientInfo, cfg *StreamConfig) *raftGroup {
replicas := cfg.Replicas
if replicas == 0 {
replicas = 1
}
cluster := ci.Cluster
if cfg.Placement != nil && cfg.Placement.Cluster != _EMPTY_ {
cluster = cfg.Placement.Cluster
}
// Need to create a group here.
// TODO(dlc) - Can be way smarter here.
peers := cc.selectPeerGroup(replicas, cluster)
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForStream(peers, cfg.Storage), Storage: cfg.Storage, Peers: peers}
}
func (s *Server) jsClusteredStreamRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, config *StreamConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
var resp = JSApiStreamCreateResponse{ApiResponse: ApiResponse{Type: JSApiStreamCreateResponseType}}
// Grab our jetstream account info.
acc.mu.RLock()
jsa := acc.js
acc.mu.RUnlock()
if jsa == nil {
resp.Error = jsNotEnabledErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
ccfg, err := checkStreamCfg(config)
if err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
cfg := &ccfg
// Check for stream limits here before proposing. These need to be tracked from meta layer, not jsa.
jsa.mu.RLock()
asa := cc.streams[acc.Name]
numStreams := len(asa)
exceeded := jsa.limits.MaxStreams > 0 && numStreams >= jsa.limits.MaxStreams
jsa.mu.RUnlock()
if exceeded {
resp.Error = jsError(fmt.Errorf("maximum number of streams reached"))
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Check for stream limits here before proposing.
if err := jsa.checkLimits(cfg); err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Now process the request and proposal.
js.mu.Lock()
defer js.mu.Unlock()
if sa := js.streamAssignment(acc.Name, cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Check for subject collisions here.
for _, sa := range asa {
for _, subj := range sa.Config.Subjects {
for _, tsubj := range cfg.Subjects {
if SubjectsCollide(tsubj, subj) {
resp.Error = jsError(fmt.Errorf("subjects overlap with an existing stream"))
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
}
}
}
// Raft group selection and placement.
rg := cc.createGroupForStream(ci, cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// Sync subject for post snapshot sync.
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now().UTC()}
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamUpdateRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, cfg *StreamConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
// Now process the request and proposal.
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
osa := js.streamAssignment(acc.Name, cfg.Name)
if osa == nil {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
var newCfg *StreamConfig
if jsa := js.accounts[acc.Name]; jsa != nil {
if ncfg, err := jsa.configUpdateCheck(osa.Config, cfg); err != nil {
resp.Error = jsError(err)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
} else {
newCfg = ncfg
}
} else {
resp.Error = jsNotEnabledErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Check for cluster changes that we want to error on.
if newCfg.Replicas != len(osa.Group.Peers) {
resp.Error = &ApiError{Code: 400, Description: "Replicas configuration can not be updated"}
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if !reflect.DeepEqual(newCfg.Mirror, osa.Config.Mirror) {
resp.Error = &ApiError{Code: 400, Description: "Mirror configuration can not be updated"}
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
sa := &streamAssignment{Group: osa.Group, Config: newCfg, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeUpdateStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamDeleteRequest(ci *ClientInfo, acc *Account, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
osa := js.streamAssignment(acc.Name, stream)
if osa == nil {
var resp = JSApiStreamDeleteResponse{ApiResponse: ApiResponse{Type: JSApiStreamDeleteResponseType}}
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Remove any remaining consumers as well.
for _, ca := range osa.consumers {
ca.Reply, ca.State = _EMPTY_, nil
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
sa := &streamAssignment{Group: osa.Group, Config: osa.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteStreamAssignment(sa))
}
func (s *Server) jsClusteredStreamPurgeRequest(ci *ClientInfo, acc *Account, mset *stream, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp := JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if n := sa.Group.node; n != nil {
sp := &streamPurge{Stream: stream, LastSeq: mset.state().LastSeq, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeStreamPurge(sp))
} else if mset != nil {
var resp = JSApiStreamPurgeResponse{ApiResponse: ApiResponse{Type: JSApiStreamPurgeResponseType}}
purged, err := mset.purge()
if err != nil {
resp.Error = jsError(err)
} else {
resp.Purged = purged
resp.Success = true
}
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
}
func (s *Server) jsClusteredStreamRestoreRequest(ci *ClientInfo, acc *Account, req *JSApiStreamRestoreRequest, stream, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
cfg := &req.Config
resp := JSApiStreamRestoreResponse{ApiResponse: ApiResponse{Type: JSApiStreamRestoreResponseType}}
if sa := js.streamAssignment(ci.serviceAccount(), cfg.Name); sa != nil {
resp.Error = jsError(ErrJetStreamStreamAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Raft group selection and placement.
rg := cc.createGroupForStream(ci, cfg)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
sa := &streamAssignment{Group: rg, Sync: syncSubjForStream(), Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now().UTC()}
// Now add in our restore state and pre-select a peer to handle the actual receipt of the snapshot.
sa.Restore = &req.State
cc.meta.Propose(encodeAddStreamAssignment(sa))
}
func (s *Server) allPeersOffline(rg *raftGroup) bool {
if rg == nil {
return false
}
// Check to see if this stream has any servers online to respond.
for _, peer := range rg.Peers {
if si, ok := s.nodeToInfo.Load(peer); ok && si != nil {
if !si.(nodeInfo).offline {
return false
}
}
}
return true
}
// This will do a scatter and gather operation for all streams for this account. This is only called from metadata leader.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredStreamListRequest(acc *Account, ci *ClientInfo, offset int, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var streams []*streamAssignment
for _, sa := range cc.streams[acc.Name] {
streams = append(streams, sa)
}
// Needs to be sorted for offsets etc.
if len(streams) > 1 {
sort.Slice(streams, func(i, j int) bool {
return strings.Compare(streams[i].Config.Name, streams[j].Config.Name) < 0
})
}
scnt := len(streams)
if offset > scnt {
offset = scnt
}
if offset > 0 {
streams = streams[offset:]
}
if len(streams) > JSApiListLimit {
streams = streams[:JSApiListLimit]
}
var resp = JSApiStreamListResponse{
ApiResponse: ApiResponse{Type: JSApiStreamListResponseType},
Streams: make([]*StreamInfo, 0, len(streams)),
}
if len(streams) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out our requests.
s.mu.Lock()
inbox := s.newRespInbox()
rc := make(chan *StreamInfo, len(streams))
// Store our handler.
s.sys.replies[inbox] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
var si StreamInfo
if err := json.Unmarshal(msg, &si); err != nil {
s.Warnf("Error unmarshaling clustered stream info response:%v", err)
return
}
select {
case rc <- &si:
default:
s.Warnf("Failed placing remote stream info result on internal channel")
}
}
s.mu.Unlock()
// Cleanup after.
defer func() {
s.mu.Lock()
if s.sys != nil && s.sys.replies != nil {
delete(s.sys.replies, inbox)
}
s.mu.Unlock()
}()
// Send out our requests here.
for _, sa := range streams {
if s.allPeersOffline(sa.Group) {
// Place offline onto our results by hand here.
si := &StreamInfo{Config: *sa.Config, Created: sa.Created, Cluster: js.offlineClusterInfo(sa.Group)}
resp.Streams = append(resp.Streams, si)
} else {
isubj := fmt.Sprintf(clusterStreamInfoT, sa.Client.serviceAccount(), sa.Config.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
}
// Don't hold lock.
js.mu.Unlock()
const timeout = 5 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
resp.Error = jsClusterIncompleteErr
break LOOP
case si := <-rc:
resp.Streams = append(resp.Streams, si)
// Check to see if we are done.
if len(resp.Streams) == len(streams) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Streams) > 1 {
sort.Slice(resp.Streams, func(i, j int) bool {
return strings.Compare(resp.Streams[i].Config.Name, resp.Streams[j].Config.Name) < 0
})
}
resp.Total = len(resp.Streams)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
// This will do a scatter and gather operation for all consumers for this stream and account.
// This will be running in a separate Go routine.
func (s *Server) jsClusteredConsumerListRequest(acc *Account, ci *ClientInfo, offset int, stream, subject, reply string, rmsg []byte) {
defer s.grWG.Done()
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
var consumers []*consumerAssignment
if sas := cc.streams[acc.Name]; sas != nil {
if sa := sas[stream]; sa != nil {
// Copy over since we need to sort etc.
for _, ca := range sa.consumers {
consumers = append(consumers, ca)
}
}
}
// Needs to be sorted.
if len(consumers) > 1 {
sort.Slice(consumers, func(i, j int) bool {
return strings.Compare(consumers[i].Name, consumers[j].Name) < 0
})
}
ocnt := len(consumers)
if offset > ocnt {
offset = ocnt
}
if offset > 0 {
consumers = consumers[offset:]
}
if len(consumers) > JSApiListLimit {
consumers = consumers[:JSApiListLimit]
}
// Send out our requests here.
var resp = JSApiConsumerListResponse{
ApiResponse: ApiResponse{Type: JSApiConsumerListResponseType},
Consumers: []*ConsumerInfo{},
}
if len(consumers) == 0 {
js.mu.Unlock()
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
return
}
// Create an inbox for our responses and send out requests.
s.mu.Lock()
inbox := s.newRespInbox()
rc := make(chan *ConsumerInfo, len(consumers))
// Store our handler.
s.sys.replies[inbox] = func(sub *subscription, _ *client, subject, _ string, msg []byte) {
var ci ConsumerInfo
if err := json.Unmarshal(msg, &ci); err != nil {
s.Warnf("Error unmarshaling clustered consumer info response:%v", err)
return
}
select {
case rc <- &ci:
default:
s.Warnf("Failed placing consumer info result on internal chan")
}
}
s.mu.Unlock()
// Cleanup after.
defer func() {
s.mu.Lock()
if s.sys != nil && s.sys.replies != nil {
delete(s.sys.replies, inbox)
}
s.mu.Unlock()
}()
for _, ca := range consumers {
if s.allPeersOffline(ca.Group) {
// Place offline onto our results by hand here.
ci := &ConsumerInfo{Config: ca.Config, Created: ca.Created, Cluster: js.offlineClusterInfo(ca.Group)}
resp.Consumers = append(resp.Consumers, ci)
} else {
isubj := fmt.Sprintf(clusterConsumerInfoT, ca.Client.serviceAccount(), stream, ca.Name)
s.sendInternalMsgLocked(isubj, inbox, nil, nil)
}
}
js.mu.Unlock()
const timeout = 2 * time.Second
notActive := time.NewTimer(timeout)
defer notActive.Stop()
LOOP:
for {
select {
case <-s.quitCh:
return
case <-notActive.C:
s.Warnf("Did not receive all stream info results for %q", acc)
break LOOP
case ci := <-rc:
resp.Consumers = append(resp.Consumers, ci)
// Check to see if we are done.
if len(resp.Consumers) == len(consumers) {
break LOOP
}
}
}
// Needs to be sorted as well.
if len(resp.Consumers) > 1 {
sort.Slice(resp.Consumers, func(i, j int) bool {
return strings.Compare(resp.Consumers[i].Name, resp.Consumers[j].Name) < 0
})
}
resp.Total = len(resp.Consumers)
resp.Limit = JSApiListLimit
resp.Offset = offset
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
func encodeStreamPurge(sp *streamPurge) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(purgeStreamOp))
json.NewEncoder(&bb).Encode(sp)
return bb.Bytes()
}
func decodeStreamPurge(buf []byte) (*streamPurge, error) {
var sp streamPurge
err := json.Unmarshal(buf, &sp)
return &sp, err
}
func (s *Server) jsClusteredConsumerDeleteRequest(ci *ClientInfo, acc *Account, stream, consumer, subject, reply string, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerDeleteResponse{ApiResponse: ApiResponse{Type: JSApiConsumerDeleteResponseType}}
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp.Error = jsNotFoundError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
if sa.consumers == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
oca := sa.consumers[consumer]
if oca == nil {
resp.Error = jsNoConsumerErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
oca.deleted = true
ca := &consumerAssignment{Group: oca.Group, Stream: stream, Name: consumer, Config: oca.Config, Subject: subject, Reply: reply, Client: ci}
cc.meta.Propose(encodeDeleteConsumerAssignment(ca))
}
func encodeMsgDelete(md *streamMsgDelete) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(deleteMsgOp))
json.NewEncoder(&bb).Encode(md)
return bb.Bytes()
}
func decodeMsgDelete(buf []byte) (*streamMsgDelete, error) {
var md streamMsgDelete
err := json.Unmarshal(buf, &md)
return &md, err
}
func (s *Server) jsClusteredMsgDeleteRequest(ci *ClientInfo, acc *Account, mset *stream, stream, subject, reply string, req *JSApiMsgDeleteRequest, rmsg []byte) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
s.Debugf("Message delete failed, could not locate stream '%s > %s'", acc.Name, stream)
return
}
// Check for single replica items.
if n := sa.Group.node; n != nil {
md := &streamMsgDelete{Seq: req.Seq, NoErase: req.NoErase, Stream: stream, Subject: subject, Reply: reply, Client: ci}
n.Propose(encodeMsgDelete(md))
} else if mset != nil {
var err error
var removed bool
if req.NoErase {
removed, err = mset.removeMsg(req.Seq)
} else {
removed, err = mset.eraseMsg(req.Seq)
}
var resp = JSApiMsgDeleteResponse{ApiResponse: ApiResponse{Type: JSApiMsgDeleteResponseType}}
if err != nil {
resp.Error = jsError(err)
} else if !removed {
resp.Error = &ApiError{Code: 400, Description: fmt.Sprintf("sequence [%d] not found", req.Seq)}
} else {
resp.Success = true
}
s.sendAPIResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(resp))
}
}
func encodeAddStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func encodeUpdateStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(updateStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func encodeDeleteStreamAssignment(sa *streamAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeStreamOp))
json.NewEncoder(&bb).Encode(sa)
return bb.Bytes()
}
func decodeStreamAssignment(buf []byte) (*streamAssignment, error) {
var sa streamAssignment
err := json.Unmarshal(buf, &sa)
return &sa, err
}
// createGroupForConsumer will create a new group with same peer set as the stream.
func (cc *jetStreamCluster) createGroupForConsumer(sa *streamAssignment) *raftGroup {
peers := sa.Group.Peers
if len(peers) == 0 {
return nil
}
return &raftGroup{Name: groupNameForConsumer(peers, sa.Config.Storage), Storage: sa.Config.Storage, Peers: peers}
}
func (s *Server) jsClusteredConsumerRequest(ci *ClientInfo, acc *Account, subject, reply string, rmsg []byte, stream string, cfg *ConsumerConfig) {
js, cc := s.getJetStreamCluster()
if js == nil || cc == nil {
return
}
js.mu.Lock()
defer js.mu.Unlock()
var resp = JSApiConsumerCreateResponse{ApiResponse: ApiResponse{Type: JSApiConsumerCreateResponseType}}
// Lookup the stream assignment.
sa := js.streamAssignment(acc.Name, stream)
if sa == nil {
resp.Error = jsError(ErrJetStreamStreamNotFound)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
rg := cc.createGroupForConsumer(sa)
if rg == nil {
resp.Error = jsInsufficientErr
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
// Pick a preferred leader.
rg.setPreferred()
// We need to set the ephemeral here before replicating.
var oname string
if !isDurableConsumer(cfg) {
// We chose to have ephemerals be R=1.
rg.Peers = []string{rg.Preferred}
rg.Name = groupNameForConsumer(rg.Peers, rg.Storage)
// Make sure name is unique.
for {
oname = createConsumerName()
if sa.consumers != nil {
if sa.consumers[oname] != nil {
continue
}
}
break
}
} else {
oname = cfg.Durable
if ca := sa.consumers[oname]; ca != nil && !ca.deleted {
// This can be ok if delivery subject update.
if !reflect.DeepEqual(cfg, ca.Config) && !configsEqualSansDelivery(*cfg, *ca.Config) {
resp.Error = jsError(ErrJetStreamConsumerAlreadyUsed)
s.sendAPIErrResponse(ci, acc, subject, reply, string(rmsg), s.jsonResponse(&resp))
return
}
}
}
ca := &consumerAssignment{Group: rg, Stream: stream, Name: oname, Config: cfg, Subject: subject, Reply: reply, Client: ci, Created: time.Now().UTC()}
cc.meta.Propose(encodeAddConsumerAssignment(ca))
}
func encodeAddConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(assignConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func encodeDeleteConsumerAssignment(ca *consumerAssignment) []byte {
var bb bytes.Buffer
bb.WriteByte(byte(removeConsumerOp))
json.NewEncoder(&bb).Encode(ca)
return bb.Bytes()
}
func decodeConsumerAssignment(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
err := json.Unmarshal(buf, &ca)
return &ca, err
}
func encodeAddConsumerAssignmentCompressed(ca *consumerAssignment) []byte {
b, err := json.Marshal(ca)
if err != nil {
return nil
}
// TODO(dlc) - Streaming better approach here probably.
var bb bytes.Buffer
bb.WriteByte(byte(assignCompressedConsumerOp))
bb.Write(s2.Encode(nil, b))
return bb.Bytes()
}
func decodeConsumerAssignmentCompressed(buf []byte) (*consumerAssignment, error) {
var ca consumerAssignment
js, err := s2.Decode(nil, buf)
if err != nil {
return nil, err
}
err = json.Unmarshal(js, &ca)
return &ca, err
}
var errBadStreamMsg = errors.New("jetstream cluster bad replicated stream msg")
func decodeStreamMsg(buf []byte) (subject, reply string, hdr, msg []byte, lseq uint64, ts int64, err error) {
var le = binary.LittleEndian
if len(buf) < 26 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
lseq = le.Uint64(buf)
buf = buf[8:]
ts = int64(le.Uint64(buf))
buf = buf[8:]
sl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < sl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
subject = string(buf[:sl])
buf = buf[sl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
rl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < rl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
reply = string(buf[:rl])
buf = buf[rl:]
if len(buf) < 2 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hl := int(le.Uint16(buf))
buf = buf[2:]
if len(buf) < hl {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
hdr = buf[:hl]
buf = buf[hl:]
if len(buf) < 4 {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
ml := int(le.Uint32(buf))
buf = buf[4:]
if len(buf) < ml {
return _EMPTY_, _EMPTY_, nil, nil, 0, 0, errBadStreamMsg
}
msg = buf[:ml]
return subject, reply, hdr, msg, lseq, ts, nil
}
func encodeStreamMsg(subject, reply string, hdr, msg []byte, lseq uint64, ts int64) []byte {
elen := 1 + 8 + 8 + len(subject) + len(reply) + len(hdr) + len(msg)
elen += (2 + 2 + 2 + 4) // Encoded lengths, 4bytes
// TODO(dlc) - check sizes of subject, reply and hdr, make sure uint16 ok.
buf := make([]byte, elen)
buf[0] = byte(streamMsgOp)
var le = binary.LittleEndian
wi := 1
le.PutUint64(buf[wi:], lseq)
wi += 8
le.PutUint64(buf[wi:], uint64(ts))
wi += 8
le.PutUint16(buf[wi:], uint16(len(subject)))
wi += 2
copy(buf[wi:], subject)
wi += len(subject)
le.PutUint16(buf[wi:], uint16(len(reply)))
wi += 2
copy(buf[wi:], reply)
wi += len(reply)
le.PutUint16(buf[wi:], uint16(len(hdr)))
wi += 2
if len(hdr) > 0 {
copy(buf[wi:], hdr)
wi += len(hdr)
}
le.PutUint32(buf[wi:], uint32(len(msg)))
wi += 4
if len(msg) > 0 {
copy(buf[wi:], msg)
wi += len(msg)
}
return buf[:wi]
}
// StreamSnapshot is used for snapshotting and out of band catch up in clustered mode.
type streamSnapshot struct {
Msgs uint64 `json:"messages"`
Bytes uint64 `json:"bytes"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
Deleted []uint64 `json:"deleted,omitempty"`
}
// Grab a snapshot of a stream for clustered mode.
func (mset *stream) stateSnapshot() []byte {
mset.mu.RLock()
defer mset.mu.RUnlock()
state := mset.store.State()
snap := &streamSnapshot{
Msgs: state.Msgs,
Bytes: state.Bytes,
FirstSeq: state.FirstSeq,
LastSeq: state.LastSeq,
Deleted: state.Deleted,
}
b, _ := json.Marshal(snap)
return b
}
// processClusteredMsg will propose the inbound message to the underlying raft group.
func (mset *stream) processClusteredInboundMsg(subject, reply string, hdr, msg []byte) (uint64, error) {
// For possible error response.
var response []byte
mset.mu.RLock()
canRespond := !mset.cfg.NoAck && len(reply) > 0
name, stype := mset.cfg.Name, mset.cfg.Storage
s, js, jsa, st, rf, outq := mset.srv, mset.js, mset.jsa, mset.cfg.Storage, mset.cfg.Replicas, mset.outq
maxMsgSize := int(mset.cfg.MaxMsgSize)
msetName := mset.cfg.Name
mset.mu.RUnlock()
// Check here pre-emptively if we have exceeded this server limits.
if js.limitsExceeded(stype) {
s.resourcesExeededError()
if canRespond {
b, _ := json.Marshal(&JSPubAckResponse{PubAck: &PubAck{Stream: name}, Error: jsInsufficientErr})
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, b, nil, 0, nil})
}
// Stepdown regardless.
if node := mset.raftNode(); node != nil {
node.StepDown()
}
return 0, ErrJetStreamResourcesExceeded
}
// Check here pre-emptively if we have exceeded our account limits.
var exceeded bool
jsa.mu.RLock()
if st == MemoryStorage {
total := jsa.storeTotal + int64(memStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxMemory > 0 && total > jsa.limits.MaxMemory {
exceeded = true
}
} else {
total := jsa.storeTotal + int64(fileStoreMsgSize(subject, hdr, msg)*uint64(rf))
if jsa.limits.MaxStore > 0 && total > jsa.limits.MaxStore {
exceeded = true
}
}
jsa.mu.RUnlock()
// If we have exceeded our account limits go ahead and return.
if exceeded {
err := fmt.Errorf("JetStream resource limits exceeded for account: %q", jsa.acc().Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}}
resp.Error = &ApiError{Code: 400, Description: "resource limits exceeded for account"}
response, _ = json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
return 0, err
}
// Check msgSize if we have a limit set there. Again this works if it goes through but better to be pre-emptive.
if maxMsgSize >= 0 && (len(hdr)+len(msg)) > maxMsgSize {
err := fmt.Errorf("JetStream message size exceeds limits for '%s > %s'", jsa.acc().Name, mset.cfg.Name)
s.Warnf(err.Error())
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: name}}
resp.Error = &ApiError{Code: 400, Description: "message size exceeds maximum allowed"}
response, _ = json.Marshal(resp)
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
return 0, err
}
// Proceed with proposing this message.
// We only use mset.clseq for clustering and in case we run ahead of actual commits.
// Check if we need to set initial value here
mset.clMu.Lock()
if mset.clseq == 0 {
mset.mu.RLock()
mset.clseq = mset.lseq
mset.mu.RUnlock()
}
esm := encodeStreamMsg(subject, reply, hdr, msg, mset.clseq, time.Now().UnixNano())
mset.clseq++
seq := mset.clseq
// Do proposal.
err := mset.node.Propose(esm)
mset.clMu.Unlock()
if err != nil {
seq = 0
mset.mu.Lock()
mset.clseq--
mset.mu.Unlock()
if canRespond {
var resp = &JSPubAckResponse{PubAck: &PubAck{Stream: mset.cfg.Name}}
resp.Error = &ApiError{Code: 503, Description: err.Error()}
response, _ = json.Marshal(resp)
}
}
// If we errored out respond here.
if err != nil && canRespond {
outq.send(&jsPubMsg{reply, _EMPTY_, _EMPTY_, nil, response, nil, 0, nil})
}
if err != nil && isOutOfSpaceErr(err) {
s.handleOutOfSpace(msetName)
}
return seq, err
}
// For requesting messages post raft snapshot to catch up streams post server restart.
// Any deleted msgs etc will be handled inline on catchup.
type streamSyncRequest struct {
Peer string `json:"peer,omitempty"`
FirstSeq uint64 `json:"first_seq"`
LastSeq uint64 `json:"last_seq"`
}
// Given a stream state that represents a snapshot, calculate the sync request based on our current state.
func (mset *stream) calculateSyncRequest(state *StreamState, snap *streamSnapshot) *streamSyncRequest {
// Quick check if we are already caught up.
if state.LastSeq >= snap.LastSeq {
return nil
}
return &streamSyncRequest{FirstSeq: state.LastSeq + 1, LastSeq: snap.LastSeq, Peer: mset.node.ID()}
}
// processSnapshotDeletes will update our current store based on the snapshot
// but only processing deletes and new FirstSeq / purges.
func (mset *stream) processSnapshotDeletes(snap *streamSnapshot) {
state := mset.store.State()
// Adjust if FirstSeq has moved.
if snap.FirstSeq > state.FirstSeq {
mset.store.Compact(snap.FirstSeq)
state = mset.store.State()
}
// Range the deleted and delete if applicable.
for _, dseq := range snap.Deleted {
if dseq <= state.LastSeq {
mset.store.RemoveMsg(dseq)
}
}
}
func (mset *stream) setCatchupPeer(peer string, lag uint64) {
if peer == _EMPTY_ {
return
}
mset.mu.Lock()
if mset.catchups == nil {
mset.catchups = make(map[string]uint64)
}
mset.catchups[peer] = lag
mset.mu.Unlock()
}
// Will decrement by one.
func (mset *stream) updateCatchupPeer(peer string) {
if peer == _EMPTY_ {
return
}
mset.mu.Lock()
if lag := mset.catchups[peer]; lag > 0 {
mset.catchups[peer] = lag - 1
}
mset.mu.Unlock()
}
func (mset *stream) clearCatchupPeer(peer string) {
mset.mu.Lock()
if mset.catchups != nil {
delete(mset.catchups, peer)
}
mset.mu.Unlock()
}
// Lock should be held.
func (mset *stream) clearAllCatchupPeers() {
if mset.catchups != nil {
mset.catchups = nil
}
}
func (mset *stream) lagForCatchupPeer(peer string) uint64 {
mset.mu.RLock()
defer mset.mu.RUnlock()
if mset.catchups == nil {
return 0
}
return mset.catchups[peer]
}
func (mset *stream) hasCatchupPeers() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return len(mset.catchups) > 0
}
func (mset *stream) setCatchingUp() {
mset.mu.Lock()
mset.catchup = true
mset.mu.Unlock()
}
func (mset *stream) clearCatchingUp() {
mset.mu.Lock()
mset.catchup = false
mset.mu.Unlock()
}
func (mset *stream) isCatchingUp() bool {
mset.mu.RLock()
defer mset.mu.RUnlock()
return mset.catchup
}
// Process a stream snapshot.
func (mset *stream) processSnapshot(snap *streamSnapshot) {
// Update any deletes, etc.
mset.processSnapshotDeletes(snap)
mset.mu.Lock()
state := mset.store.State()
sreq := mset.calculateSyncRequest(&state, snap)
s, subject, n := mset.srv, mset.sa.Sync, mset.node
msetName := mset.cfg.Name
mset.mu.Unlock()
// Just return if up to date..
if sreq == nil {
return
}
// Pause the apply channel for our raft group while we catch up.
n.PauseApply()
defer n.ResumeApply()
// Set our catchup state.
mset.setCatchingUp()
defer mset.clearCatchingUp()
js := s.getJetStream()
var sub *subscription
var err error
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
defer func() {
if sub != nil {
s.sysUnsubscribe(sub)
}
// Make sure any consumers are updated for the pending amounts.
mset.mu.Lock()
for _, o := range mset.consumers {
o.mu.Lock()
if o.isLeader() {
o.setInitialPending()
}
o.mu.Unlock()
}
mset.mu.Unlock()
}()
RETRY:
// If we have a sub clear that here.
if sub != nil {
s.sysUnsubscribe(sub)
sub = nil
}
// Grab sync request again on failures.
if sreq == nil {
mset.mu.Lock()
state := mset.store.State()
sreq = mset.calculateSyncRequest(&state, snap)
mset.mu.Unlock()
if sreq == nil {
return
}
}
// Used to transfer message from the wire to another Go routine internally.
type im struct {
msg []byte
reply string
}
msgsC := make(chan *im, 32768)
// Send our catchup request here.
reply := syncReplySubject()
sub, err = s.sysSubscribe(reply, func(_ *subscription, _ *client, _, reply string, msg []byte) {
// Make copies - https://github.com/go101/go101/wiki
// TODO(dlc) - Since we are using a buffer from the inbound client/route.
select {
case msgsC <- &im{append(msg[:0:0], msg...), reply}:
default:
s.Warnf("Failed to place catchup message onto internal channel: %d pending", len(msgsC))
return
}
})
if err != nil {
s.Errorf("Could not subscribe to stream catchup: %v", err)
return
}
b, _ := json.Marshal(sreq)
s.sendInternalMsgLocked(subject, reply, nil, b)
// Clear our sync request and capture last.
last := sreq.LastSeq
sreq = nil
// Run our own select loop here.
for qch, lch := n.QuitC(), n.LeadChangeC(); ; {
select {
case mrec := <-msgsC:
notActive.Reset(activityInterval)
msg := mrec.msg
// Check for eof signaling.
if len(msg) == 0 {
return
}
if lseq, err := mset.processCatchupMsg(msg); err == nil {
if lseq >= last {
return
}
} else if isOutOfSpaceErr(err) {
s.handleOutOfSpace(msetName)
return
} else if err == ErrJetStreamResourcesExceeded {
s.resourcesExeededError()
return
} else {
goto RETRY
}
if mrec.reply != _EMPTY_ {
s.sendInternalMsgLocked(mrec.reply, _EMPTY_, nil, nil)
}
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.name())
notActive.Reset(activityInterval)
goto RETRY
case <-s.quitCh:
return
case <-qch:
return
case isLeader := <-lch:
js.processStreamLeaderChange(mset, isLeader)
}
}
}
// processCatchupMsg will be called to process out of band catchup msgs from a sync request.
func (mset *stream) processCatchupMsg(msg []byte) (uint64, error) {
if len(msg) == 0 || entryOp(msg[0]) != streamMsgOp {
// TODO(dlc) - This is error condition, log.
return 0, errors.New("bad catchup msg")
}
subj, _, hdr, msg, seq, ts, err := decodeStreamMsg(msg[1:])
if err != nil {
return 0, errors.New("bad catchup msg")
}
if mset.js.limitsExceeded(mset.cfg.Storage) {
return 0, ErrJetStreamResourcesExceeded
}
// Put into our store
// Messages to be skipped have no subject or timestamp.
// TODO(dlc) - formalize with skipMsgOp
if subj == _EMPTY_ && ts == 0 {
lseq := mset.store.SkipMsg()
if lseq != seq {
return 0, errors.New("wrong sequence for skipped msg")
}
} else if err := mset.store.StoreRawMsg(subj, hdr, msg, seq, ts); err != nil {
return 0, err
}
// Update our lseq.
mset.setLastSeq(seq)
return seq, nil
}
func (mset *stream) handleClusterSyncRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
var sreq streamSyncRequest
if err := json.Unmarshal(msg, &sreq); err != nil {
// Log error.
return
}
mset.srv.startGoRoutine(func() { mset.runCatchup(reply, &sreq) })
}
// Lock should be held.
func (js *jetStream) offlineClusterInfo(rg *raftGroup) *ClusterInfo {
s := js.srv
ci := &ClusterInfo{Name: s.ClusterName()}
for _, peer := range rg.Peers {
if sir, ok := s.nodeToInfo.Load(peer); ok && sir != nil {
si := sir.(nodeInfo)
pi := &PeerInfo{Name: si.name, Current: false, Offline: true}
ci.Replicas = append(ci.Replicas, pi)
}
}
return ci
}
// clusterInfo will report on the status of the raft group.
func (js *jetStream) clusterInfo(rg *raftGroup) *ClusterInfo {
if js == nil {
return nil
}
js.mu.RLock()
defer js.mu.RUnlock()
s := js.srv
if rg == nil || rg.node == nil {
return &ClusterInfo{
Name: s.ClusterName(),
Leader: s.Name(),
}
}
n := rg.node
ci := &ClusterInfo{
Name: s.ClusterName(),
Leader: s.serverNameForNode(n.GroupLeader()),
}
now := time.Now()
id, peers := n.ID(), n.Peers()
// If we are leaderless, do not suppress putting us in the peer list.
if ci.Leader == _EMPTY_ {
id = _EMPTY_
}
for _, rp := range peers {
if rp.ID != id && rg.isMember(rp.ID) {
lastSeen := now.Sub(rp.Last)
if lastSeen < 0 {
lastSeen = 1
}
current := rp.Current
if current && lastSeen > lostQuorumInterval {
current = false
}
if sir, ok := s.nodeToInfo.Load(rp.ID); ok && sir != nil {
si := sir.(nodeInfo)
pi := &PeerInfo{Name: si.name, Current: current, Offline: si.offline, Active: lastSeen, Lag: rp.Lag}
ci.Replicas = append(ci.Replicas, pi)
}
}
}
return ci
}
func (mset *stream) checkClusterInfo(si *StreamInfo) {
for _, r := range si.Cluster.Replicas {
peer := string(getHash(r.Name))
if lag := mset.lagForCatchupPeer(peer); lag > 0 {
r.Current = false
r.Lag = lag
}
}
}
func (mset *stream) handleClusterStreamInfoRequest(sub *subscription, c *client, subject, reply string, msg []byte) {
mset.mu.RLock()
sysc, js, sa, config := mset.sysc, mset.srv.js, mset.sa, mset.cfg
stype := mset.cfg.Storage
isLeader := mset.isLeader()
mset.mu.RUnlock()
// By design all members will receive this. Normally we only want the leader answering.
// But if we have stalled and lost quorom all can respond.
if sa != nil && !js.isGroupLeaderless(sa.Group) && !isLeader {
return
}
// If we are here we are in a compromised state due to server limits let someone else answer if they can.
if !isLeader && js.limitsExceeded(stype) {
time.Sleep(100 * time.Millisecond)
}
si := &StreamInfo{
Created: mset.createdTime(),
State: mset.state(),
Config: config,
Cluster: js.clusterInfo(mset.raftGroup()),
Sources: mset.sourcesInfo(),
Mirror: mset.mirrorInfo(),
}
// Check for out of band catchups.
if mset.hasCatchupPeers() {
mset.checkClusterInfo(si)
}
sysc.sendInternalMsg(reply, _EMPTY_, nil, si)
}
func (mset *stream) runCatchup(sendSubject string, sreq *streamSyncRequest) {
s := mset.srv
defer s.grWG.Done()
const maxOutBytes = int64(1 * 1024 * 1024) // 1MB for now.
const maxOutMsgs = int32(16384)
outb := int64(0)
outm := int32(0)
// Flow control processing.
ackReplySize := func(subj string) int64 {
if li := strings.LastIndexByte(subj, btsep); li > 0 && li < len(subj) {
return parseAckReplyNum(subj[li+1:])
}
return 0
}
nextBatchC := make(chan struct{}, 1)
nextBatchC <- struct{}{}
// Setup ackReply for flow control.
ackReply := syncAckSubject()
ackSub, _ := s.sysSubscribe(ackReply, func(sub *subscription, c *client, subject, reply string, msg []byte) {
sz := ackReplySize(subject)
atomic.AddInt64(&outb, -sz)
atomic.AddInt32(&outm, -1)
mset.updateCatchupPeer(sreq.Peer)
select {
case nextBatchC <- struct{}{}:
default:
}
})
defer s.sysUnsubscribe(ackSub)
ackReplyT := strings.ReplaceAll(ackReply, ".*", ".%d")
// EOF
defer s.sendInternalMsgLocked(sendSubject, _EMPTY_, nil, nil)
const activityInterval = 5 * time.Second
notActive := time.NewTimer(activityInterval)
defer notActive.Stop()
// Setup sequences to walk through.
seq, last := sreq.FirstSeq, sreq.LastSeq
mset.setCatchupPeer(sreq.Peer, last-seq)
defer mset.clearCatchupPeer(sreq.Peer)
sendNextBatch := func() {
for ; seq <= last && atomic.LoadInt64(&outb) <= maxOutBytes && atomic.LoadInt32(&outm) <= maxOutMsgs; seq++ {
subj, hdr, msg, ts, err := mset.store.LoadMsg(seq)
// if this is not a deleted msg, bail out.
if err != nil && err != ErrStoreMsgNotFound && err != errDeletedMsg {
// break, something changed.
seq = last + 1
return
}
// S2?
em := encodeStreamMsg(subj, _EMPTY_, hdr, msg, seq, ts)
// Place size in reply subject for flow control.
reply := fmt.Sprintf(ackReplyT, len(em))
atomic.AddInt64(&outb, int64(len(em)))
atomic.AddInt32(&outm, 1)
s.sendInternalMsgLocked(sendSubject, reply, nil, em)
}
}
// Grab stream quit channel.
mset.mu.RLock()
qch := mset.qch
mset.mu.RUnlock()
if qch == nil {
return
}
// Run as long as we are still active and need catchup.
// FIXME(dlc) - Purge event? Stream delete?
for {
select {
case <-s.quitCh:
return
case <-qch:
return
case <-notActive.C:
s.Warnf("Catchup for stream '%s > %s' stalled", mset.account(), mset.name())
return
case <-nextBatchC:
// Update our activity timer.
notActive.Reset(activityInterval)
sendNextBatch()
// Check if we are finished.
if seq > last {
s.Debugf("Done resync for stream '%s > %s'", mset.account(), mset.name())
return
}
}
}
}
func syncSubjForStream() string {
return syncSubject("$JSC.SYNC")
}
func syncReplySubject() string {
return syncSubject("$JSC.R")
}
func infoReplySubject() string {
return syncSubject("$JSC.R")
}
func syncAckSubject() string {
return syncSubject("$JSC.ACK") + ".*"
}
func syncSubject(pre string) string {
var sb strings.Builder
sb.WriteString(pre)
sb.WriteByte(btsep)
var b [replySuffixLen]byte
rn := rand.Int63()
for i, l := 0, rn; i < len(b); i++ {
b[i] = digits[l%base]
l /= base
}
sb.Write(b[:])
return sb.String()
}
const (
clusterStreamInfoT = "$JSC.SI.%s.%s"
clusterConsumerInfoT = "$JSC.CI.%s.%s.%s"
jsaUpdatesSubT = "$JSC.ARU.%s.*"
jsaUpdatesPubT = "$JSC.ARU.%s.%s"
)
| 1 | 12,895 | Should we put in which account we were trying to look up? | nats-io-nats-server | go |
@@ -55,6 +55,7 @@ class InventoryTypeClass(object):
RESOURCE = 'resource'
IAM_POLICY = 'iam_policy'
GCS_POLICY = 'gcs_policy'
+ Supported_TypeClass = [RESOURCE, IAM_POLICY, GCS_POLICY]
class InventoryIndex(BASE): | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Inventory storage implementation. """
import datetime
import json
from sqlalchemy import Column
from sqlalchemy import Text
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import and_
from sqlalchemy.orm import aliased
from sqlalchemy.ext.declarative import declarative_base
from google.cloud.security.iam.inventory.inventory2.storage import \
Storage as BaseStorage
# TODO: Remove this when time allows
# pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc
# pylint: disable=missing-param-doc
BASE = declarative_base()
CURRENT_SCHEMA = 1
PER_YIELD = 1024
class InventoryState(object):
"""Possible states for inventory."""
SUCCESS = 'SUCCESS'
RUNNING = 'RUNNING'
FAILURE = 'FAILURE'
PARTIAL_SUCCESS = 'PARTIAL_SUCCESS'
TIMEOUT = 'TIMEOUT'
CREATED = 'CREATED'
class InventoryTypeClass(object):
"""Inventory Type Classes."""
RESOURCE = 'resource'
IAM_POLICY = 'iam_policy'
GCS_POLICY = 'gcs_policy'
class InventoryIndex(BASE):
"""Represents a GCP inventory."""
__tablename__ = 'inventory_index'
id = Column(Integer(), primary_key=True, autoincrement=True)
start_time = Column(DateTime())
complete_time = Column(DateTime())
status = Column(Text())
schema_version = Column(Integer())
progress = Column(Text())
counter = Column(Integer())
warnings = Column(Text())
errors = Column(Text())
message = Column(Text())
@classmethod
def _utcnow(cls):
"""Return current time in utc.
Returns:
object: UTC now time object.
"""
return datetime.datetime.utcnow()
def __repr__(self):
"""Object string representation.
Returns:
str: String representation of the object.
"""
return """<{}(id='{}', version='{}', timestamp='{}')>""".format(
self.__class__.__name__,
self.id,
self.schema_version,
self.start_time)
@classmethod
def create(cls):
"""Create a new inventory index row.
Returns:
object: InventoryIndex row object.
"""
return InventoryIndex(
start_time=cls._utcnow(),
complete_time=datetime.datetime.utcfromtimestamp(0),
status=InventoryState.CREATED,
schema_version=CURRENT_SCHEMA,
counter=0)
def complete(self, status=InventoryState.SUCCESS):
"""Mark the inventory as completed with a final status.
Args:
status (str): Final status.
"""
self.complete_time = InventoryIndex._utcnow()
self.status = status
def add_warning(self, session, warning):
"""Add a warning to the inventory.
Args:
session (object): session object to work on.
warning (str): Warning message
"""
warning_message = '{}\n'.format(warning)
if not self.warnings:
self.warnings = warning_message
else:
self.warnings += warning_message
session.add(self)
session.flush()
def set_error(self, session, message):
"""Indicate a broken import.
Args:
session (object): session object to work on.
message (str): Error message to set.
"""
self.message = message
session.add(self)
session.flush()
class Inventory(BASE):
"""Resource inventory table."""
__tablename__ = 'gcp_inventory'
# Order is used to resemble the order of insert for a given inventory
order = Column(Integer, primary_key=True, autoincrement=True)
index = Column(Integer)
type_class = Column(Text)
key = Column(Text)
type = Column(Text)
data = Column(Text)
parent_key = Column(Text)
parent_type = Column(Text)
other = Column(Text)
error = Column(Text)
@classmethod
def from_resource(cls, index, resource):
"""Creates a database row object from a crawled resource.
Args:
index (int): Inventory index number to associate.
resource (object): Crawled resource.
Returns:
object: database row object.
"""
parent = resource.parent()
iam_policy = resource.getIamPolicy()
gcs_policy = resource.getGCSPolicy()
rows = []
rows.append(
Inventory(
index=index.id,
type_class=InventoryTypeClass.RESOURCE,
key=resource.key(),
type=resource.type(),
data=json.dumps(resource.data()),
parent_key=None if not parent else parent.key(),
parent_type=None if not parent else parent.type(),
other=None,
error=None))
if iam_policy:
rows.append(
Inventory(
index=index.id,
type_class=InventoryTypeClass.IAM_POLICY,
key=resource.key(),
type=resource.type(),
data=json.dumps(iam_policy),
parent_key=resource.key(),
parent_type=resource.type(),
other=None,
error=None))
if gcs_policy:
rows.append(
Inventory(
index=index.id,
type_class=InventoryTypeClass.GCS_POLICY,
key=resource.key(),
type=resource.type(),
data=json.dumps(gcs_policy),
parent_key=resource.key(),
parent_type=resource.type(),
other=None,
error=None))
return rows
@classmethod
def update_resource(cls, resource):
resource._row.error = resource._warning
def __repr__(self):
"""String representation of the database row object."""
return """<{}(index='{}', key='{}', type='{}')>""".format(
self.__class__.__name__,
self.index,
self.key,
self.type)
def get_key(self):
"""Get the row's resource key.
Returns:
str: resource key.
"""
return self.key
def get_type(self):
"""Get the row's resource type.
Returns:
str: resource type.
"""
return self.type
def get_parent_key(self):
"""Get the row's parent key.
Returns:
str: parent key.
"""
return self.parent_key
def get_parent_type(self):
"""Get the row's parent type.
Returns:
str: parent type.
"""
return self.parent_type
def get_data(self):
"""Get the row's raw data.
Returns:
dict: row's raw data.
"""
return json.loads(self.data)
def get_other(self):
"""Get the row's other data.
Returns:
dict: row's other data.
"""
return json.loads(self.other)
def get_error(self):
"""Get the row's error data.
Returns:
str: row's error data.
"""
return self.error
class BufferedDbWriter(object):
"""Buffered db writing."""
def __init__(self, session, max_size=1024):
self.session = session
self.buffer = []
self.max_size = max_size
def add(self, obj):
"""Add an object to the buffer to write to db.
Args:
obj (object): Object to write to db.
"""
self.buffer.append(obj)
if self.buffer >= self.max_size:
self.flush()
def flush(self):
"""Flush all pending objects to the database."""
self.session.add_all(self.buffer)
self.session.flush()
self.buffer = []
class DataAccess(object):
"""Access to inventory for services."""
@classmethod
def delete(cls, session, inventory_id):
"""Delete an inventory index entry by id.
Args:
session (object): Database session.
inventory_id (int): Id specifying which inventory to delete.
Raises:
Exception: Reraises any exception.
"""
try:
result = cls.get(session, inventory_id)
session.query(Inventory).filter(
Inventory.index == inventory_id).delete()
session.query(InventoryIndex).filter(
InventoryIndex.id == inventory_id).delete()
session.commit()
return result
except Exception:
session.rollback()
raise
@classmethod
def list(cls, session):
"""List all inventory index entries.
Args:
session (object): Database session
Yields:
InventoryIndex: Generates each row
"""
for row in session.query(InventoryIndex).yield_per(PER_YIELD):
session.expunge(row)
yield row
@classmethod
def get(cls, session, inventory_id):
"""Get an inventory index entry by id.
Args:
session (object): Database session
inventory_id (int): Inventory id
Returns:
InventoryIndex: Entry corresponding the id
"""
result = (
session.query(InventoryIndex)
.filter(InventoryIndex.id == inventory_id)
.one())
session.expunge(result)
return result
def initialize(engine):
"""Create all tables in the database if not existing.
Args:
engine (object): Database engine to operate on.
"""
BASE.metadata.create_all(engine)
class Storage(BaseStorage):
"""Inventory storage used during creation."""
def __init__(self, session, existing_id=None, readonly=False):
self.session = session
self.opened = False
self.index = None
self.buffer = BufferedDbWriter(self.session)
self._existing_id = existing_id
self.session_completed = False
self.readonly = readonly
def _require_opened(self):
"""Make sure the storage is in 'open' state.
Raises:
Exception: If storage is not opened.
"""
if not self.opened:
raise Exception('Storage is not opened')
def _create(self):
"""Create a new inventory.
Returns:
int: Index number of the created inventory.
Raises:
Exception: Reraises any exception.
"""
try:
index = InventoryIndex.create()
self.session.add(index)
except Exception:
self.session.rollback()
raise
else:
return index
def _open(self, existing_id):
"""Open an existing inventory.
Returns:
object: The inventory db row.
"""
return (
self.session.query(InventoryIndex)
.filter(InventoryIndex.id == existing_id)
.filter(InventoryIndex.status.in_(
[InventoryState.SUCCESS, InventoryState.PARTIAL_SUCCESS]))
.one())
def open(self, handle=None):
"""Open the storage, potentially create a new index.
Args:
handle (int): If None, create a new index instead
of opening an existing one.
Returns:
int: Index number of the opened or created inventory.
Raises:
Exception: if open was called more than once
"""
existing_id = handle
if self.opened:
raise Exception('open called before')
# existing_id in open overrides potential constructor given id
existing_id = existing_id if existing_id else self._existing_id
# Should we create a new entry or are we opening an existing one?
if existing_id:
self.index = self._open(existing_id)
else:
self.index = self._create()
self.opened = True
self.session.commit()
if not self.readonly:
self.session.begin_nested()
return self.index.id
def rollback(self):
"""Roll back the stored inventory, but keep the index entry."""
try:
self.buffer.flush()
self.session.rollback()
self.index.complete(status=InventoryState.FAILURE)
self.session.commit()
finally:
self.session_completed = True
def commit(self):
"""Commit the stored inventory."""
try:
self.buffer.flush()
self.session.commit()
self.index.complete()
self.session.commit()
finally:
self.session_completed = True
def close(self):
"""Close the storage.
Raises:
Exception: If the storage was not opened before or
if the storage is writeable but neither
rollback nor commit has been called.
"""
if not self.opened:
raise Exception('not open')
if not self.readonly and not self.session_completed:
raise Exception('Need to perform commit or rollback before close')
self.opened = False
def write(self, resource):
"""Write a resource to the storage and updates its row
Args:
resource (object): Resource object to store in db.
Raises:
Exception: If storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
rows = Inventory.from_resource(self.index, resource)
for row in rows:
self.buffer.add(row)
self.index.counter += len(rows)
resource._row = rows[0]
def update(self, resource):
"""Update a resource in the storage.
Args:
resource (object): Resource object to store in db.
Raises:
Exception: If storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
Inventory.update_resource(resource)
self.buffer.add(resource._row)
def read(self, key):
"""Read a resource from the storage.
Args:
key (str): Key of the object to read.
Returns:
object: Row object read from database.
"""
self.buffer.flush()
return (
self.session.query(Inventory)
.filter(Inventory.index == self.index.id)
.filter(Inventory.key == key)
.one())
def error(self, message):
"""Store a fatal error in storage. This will help debug problems.
Args:
message (str): Error message describing the problem.
Raises:
Exception: If the storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
self.index.set_error(self.session, message)
def warning(self, message):
"""Store a fatal error in storage. This will help debug problems.
Args:
message (str): Error message describing the problem.
Raises:
Exception: If the storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
self.index.add_warning(self.session, message)
def iter(self,
type_list=None,
fetch_iam_policy=False,
fetch_gcs_policy=False,
with_parent=False):
"""Iterate the objects in the storage.
Args:
type_list (list): List of types to iterate over, or [] for all.
fetch_iam_policy (bool): Yield iam policies.
with_parent (bool): Join parent with results, yield tuples.
Yields:
object: Single row object or child/parent if 'with_parent' is set.
"""
filters = []
filters.append(Inventory.index == self.index.id)
if fetch_iam_policy:
filters.append(
Inventory.type_class == InventoryTypeClass.IAM_POLICY)
elif fetch_gcs_policy:
filters.append(
Inventory.type_class == InventoryTypeClass.GCS_POLICY)
else:
filters.append(
Inventory.type_class == InventoryTypeClass.RESOURCE)
if type_list:
filters.append(Inventory.type.in_(type_list))
if with_parent:
parent_inventory = aliased(Inventory)
p_key = parent_inventory.key
p_type = parent_inventory.type
base_query = (
self.session.query(Inventory, parent_inventory)
.filter(
and_(
Inventory.parent_key == p_key,
Inventory.parent_type == p_type,
parent_inventory.index == self.index.id)))
else:
base_query = self.session.query(Inventory)
for qry_filter in filters:
base_query = base_query.filter(qry_filter)
base_query = base_query.order_by(Inventory.order.asc())
for row in base_query.yield_per(PER_YIELD):
yield row
def __enter__(self):
"""To support with statement for auto closing."""
self.open()
return self
def __exit__(self, type_p, value, traceback):
"""To support with statement for auto closing.
Args:
type_p (object): Unused.
value (object): Unused.
traceback (object): Unused.
"""
self.close()
| 1 | 28,012 | either SUPPORTED_TYPECLASS or supported_typeclass. Camelcase only for class names. | forseti-security-forseti-security | py |
@@ -31,6 +31,7 @@ CREATE TABLE notes (
is_todo INT NOT NULL DEFAULT 0,
todo_due INT NOT NULL DEFAULT 0,
todo_completed INT NOT NULL DEFAULT 0,
+ pinned INT NOT NULL DEFAULT 0,
source TEXT NOT NULL DEFAULT "",
source_application TEXT NOT NULL DEFAULT "",
application_data TEXT NOT NULL DEFAULT "", | 1 | const { promiseChain } = require('lib/promise-utils.js');
const { Database } = require('lib/database.js');
const { sprintf } = require('sprintf-js');
const Resource = require('lib/models/Resource');
const { shim } = require('lib/shim.js');
const structureSql = `
CREATE TABLE folders (
id TEXT PRIMARY KEY,
title TEXT NOT NULL DEFAULT "",
created_time INT NOT NULL,
updated_time INT NOT NULL
);
CREATE INDEX folders_title ON folders (title);
CREATE INDEX folders_updated_time ON folders (updated_time);
CREATE TABLE notes (
id TEXT PRIMARY KEY,
parent_id TEXT NOT NULL DEFAULT "",
title TEXT NOT NULL DEFAULT "",
body TEXT NOT NULL DEFAULT "",
created_time INT NOT NULL,
updated_time INT NOT NULL,
is_conflict INT NOT NULL DEFAULT 0,
latitude NUMERIC NOT NULL DEFAULT 0,
longitude NUMERIC NOT NULL DEFAULT 0,
altitude NUMERIC NOT NULL DEFAULT 0,
author TEXT NOT NULL DEFAULT "",
source_url TEXT NOT NULL DEFAULT "",
is_todo INT NOT NULL DEFAULT 0,
todo_due INT NOT NULL DEFAULT 0,
todo_completed INT NOT NULL DEFAULT 0,
source TEXT NOT NULL DEFAULT "",
source_application TEXT NOT NULL DEFAULT "",
application_data TEXT NOT NULL DEFAULT "",
\`order\` INT NOT NULL DEFAULT 0
);
CREATE INDEX notes_title ON notes (title);
CREATE INDEX notes_updated_time ON notes (updated_time);
CREATE INDEX notes_is_conflict ON notes (is_conflict);
CREATE INDEX notes_is_todo ON notes (is_todo);
CREATE INDEX notes_order ON notes (\`order\`);
CREATE TABLE tags (
id TEXT PRIMARY KEY,
title TEXT NOT NULL DEFAULT "",
created_time INT NOT NULL,
updated_time INT NOT NULL
);
CREATE INDEX tags_title ON tags (title);
CREATE INDEX tags_updated_time ON tags (updated_time);
CREATE TABLE note_tags (
id TEXT PRIMARY KEY,
note_id TEXT NOT NULL,
tag_id TEXT NOT NULL,
created_time INT NOT NULL,
updated_time INT NOT NULL
);
CREATE INDEX note_tags_note_id ON note_tags (note_id);
CREATE INDEX note_tags_tag_id ON note_tags (tag_id);
CREATE INDEX note_tags_updated_time ON note_tags (updated_time);
CREATE TABLE resources (
id TEXT PRIMARY KEY,
title TEXT NOT NULL DEFAULT "",
mime TEXT NOT NULL,
filename TEXT NOT NULL DEFAULT "",
created_time INT NOT NULL,
updated_time INT NOT NULL
);
CREATE INDEX resources_title ON resources (title);
CREATE INDEX resources_updated_time ON resources (updated_time);
CREATE TABLE settings (
\`key\` TEXT PRIMARY KEY,
\`value\` TEXT,
\`type\` INT NOT NULL
);
CREATE TABLE table_fields (
id INTEGER PRIMARY KEY,
table_name TEXT NOT NULL,
field_name TEXT NOT NULL,
field_type INT NOT NULL,
field_default TEXT
);
CREATE TABLE sync_items (
id INTEGER PRIMARY KEY,
sync_target INT NOT NULL,
sync_time INT NOT NULL DEFAULT 0,
item_type INT NOT NULL,
item_id TEXT NOT NULL
);
CREATE INDEX sync_items_sync_time ON sync_items (sync_time);
CREATE INDEX sync_items_sync_target ON sync_items (sync_target);
CREATE INDEX sync_items_item_type ON sync_items (item_type);
CREATE INDEX sync_items_item_id ON sync_items (item_id);
CREATE TABLE deleted_items (
id INTEGER PRIMARY KEY,
item_type INT NOT NULL,
item_id TEXT NOT NULL,
deleted_time INT NOT NULL
);
CREATE TABLE version (
version INT NOT NULL
);
INSERT INTO version (version) VALUES (1);
`;
class JoplinDatabase extends Database {
constructor(driver) {
super(driver);
this.initialized_ = false;
this.tableFields_ = null;
this.version_ = null;
}
initialized() {
return this.initialized_;
}
async open(options) {
await super.open(options);
return this.initialize();
}
tableFieldNames(tableName) {
const tf = this.tableFields(tableName);
const output = [];
for (let i = 0; i < tf.length; i++) {
output.push(tf[i].name);
}
return output;
}
tableFields(tableName, options = null) {
if (options === null) options = {};
if (!this.tableFields_) throw new Error('Fields have not been loaded yet');
if (!this.tableFields_[tableName]) throw new Error(`Unknown table: ${tableName}`);
const output = this.tableFields_[tableName].slice();
if (options.includeDescription) {
for (let i = 0; i < output.length; i++) {
output[i].description = this.fieldDescription(tableName, output[i].name);
}
}
return output;
}
async clearForTesting() {
const tableNames = [
'notes',
'folders',
'resources',
'tags',
'note_tags',
// 'master_keys',
'item_changes',
'note_resources',
// 'settings',
'deleted_items',
'sync_items',
'notes_normalized',
'revisions',
'resources_to_download',
'key_values',
];
const queries = [];
for (const n of tableNames) {
queries.push(`DELETE FROM ${n}`);
queries.push(`DELETE FROM sqlite_sequence WHERE name="${n}"`); // Reset autoincremented IDs
}
queries.push('DELETE FROM settings WHERE key="sync.1.context"');
queries.push('DELETE FROM settings WHERE key="sync.2.context"');
queries.push('DELETE FROM settings WHERE key="sync.3.context"');
queries.push('DELETE FROM settings WHERE key="sync.4.context"');
queries.push('DELETE FROM settings WHERE key="sync.5.context"');
queries.push('DELETE FROM settings WHERE key="sync.6.context"');
queries.push('DELETE FROM settings WHERE key="sync.7.context"');
queries.push('DELETE FROM settings WHERE key="revisionService.lastProcessedChangeId"');
queries.push('DELETE FROM settings WHERE key="resourceService.lastProcessedChangeId"');
queries.push('DELETE FROM settings WHERE key="searchEngine.lastProcessedChangeId"');
await this.transactionExecBatch(queries);
}
createDefaultRow() {
const row = {};
const fields = this.tableFields('resource_local_states');
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
row[f.name] = Database.formatValue(f.type, f.default);
}
return row;
}
fieldDescription(tableName, fieldName) {
const sp = sprintf;
if (!this.tableDescriptions_) {
this.tableDescriptions_ = {
notes: {
parent_id: sp('ID of the notebook that contains this note. Change this ID to move the note to a different notebook.'),
body: sp('The note body, in Markdown. May also contain HTML.'),
is_conflict: sp('Tells whether the note is a conflict or not.'),
is_todo: sp('Tells whether this note is a todo or not.'),
todo_due: sp('When the todo is due. An alarm will be triggered on that date.'),
todo_completed: sp('Tells whether todo is completed or not. This is a timestamp in milliseconds.'),
source_url: sp('The full URL where the note comes from.'),
},
folders: {},
resources: {},
tags: {},
};
const baseItems = ['notes', 'folders', 'tags', 'resources'];
for (let i = 0; i < baseItems.length; i++) {
const n = baseItems[i];
const singular = n.substr(0, n.length - 1);
this.tableDescriptions_[n].title = sp('The %s title.', singular);
this.tableDescriptions_[n].created_time = sp('When the %s was created.', singular);
this.tableDescriptions_[n].updated_time = sp('When the %s was last updated.', singular);
this.tableDescriptions_[n].user_created_time = sp('When the %s was created. It may differ from created_time as it can be manually set by the user.', singular);
this.tableDescriptions_[n].user_updated_time = sp('When the %s was last updated. It may differ from updated_time as it can be manually set by the user.', singular);
}
}
const d = this.tableDescriptions_[tableName];
return d && d[fieldName] ? d[fieldName] : '';
}
refreshTableFields() {
this.logger().info('Initializing tables...');
const queries = [];
queries.push(this.wrapQuery('DELETE FROM table_fields'));
return this.selectAll('SELECT name FROM sqlite_master WHERE type="table"')
.then(tableRows => {
const chain = [];
for (let i = 0; i < tableRows.length; i++) {
const tableName = tableRows[i].name;
if (tableName == 'android_metadata') continue;
if (tableName == 'table_fields') continue;
if (tableName == 'sqlite_sequence') continue;
if (tableName.indexOf('notes_fts') === 0) continue;
chain.push(() => {
return this.selectAll(`PRAGMA table_info("${tableName}")`).then(pragmas => {
for (let i = 0; i < pragmas.length; i++) {
const item = pragmas[i];
// In SQLite, if the default value is a string it has double quotes around it, so remove them here
let defaultValue = item.dflt_value;
if (typeof defaultValue == 'string' && defaultValue.length >= 2 && defaultValue[0] == '"' && defaultValue[defaultValue.length - 1] == '"') {
defaultValue = defaultValue.substr(1, defaultValue.length - 2);
}
const q = Database.insertQuery('table_fields', {
table_name: tableName,
field_name: item.name,
field_type: Database.enumId('fieldType', item.type),
field_default: defaultValue,
});
queries.push(q);
}
});
});
}
return promiseChain(chain);
})
.then(() => {
return this.transactionExecBatch(queries);
});
}
addMigrationFile(num) {
const timestamp = Date.now();
return { sql: 'INSERT INTO migrations (number, created_time, updated_time) VALUES (?, ?, ?)', params: [num, timestamp, timestamp] };
}
async upgradeDatabase(fromVersion) {
// INSTRUCTIONS TO UPGRADE THE DATABASE:
//
// 1. Add the new version number to the existingDatabaseVersions array
// 2. Add the upgrade logic to the "switch (targetVersion)" statement below
// IMPORTANT:
//
// Whenever adding a new database property, some additional logic might be needed
// in the synchronizer to handle this property. For example, when adding a property
// that should have a default value, existing remote items will not have this
// default value and thus might cause problems. In that case, the default value
// must be set in the synchronizer too.
// Note: v16 and v17 don't do anything. They were used to debug an issue.
const existingDatabaseVersions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28];
let currentVersionIndex = existingDatabaseVersions.indexOf(fromVersion);
// currentVersionIndex < 0 if for the case where an old version of Joplin used with a newer
// version of the database, so that migration is not run in this case.
if (currentVersionIndex < 0) {
throw new Error(
'Unknown profile version. Most likely this is an old version of Joplin, while the profile was created by a newer version. Please upgrade Joplin at https://joplinapp.org and try again.\n'
+ `Joplin version: ${shim.appVersion()}\n`
+ `Profile version: ${fromVersion}\n`
+ `Expected version: ${existingDatabaseVersions[existingDatabaseVersions.length - 1]}`);
}
if (currentVersionIndex == existingDatabaseVersions.length - 1) return fromVersion;
let latestVersion = fromVersion;
while (currentVersionIndex < existingDatabaseVersions.length - 1) {
const targetVersion = existingDatabaseVersions[currentVersionIndex + 1];
this.logger().info(`Converting database to version ${targetVersion}`);
let queries = [];
if (targetVersion == 1) {
queries = this.wrapQueries(this.sqlStringToLines(structureSql));
}
if (targetVersion == 2) {
const newTableSql = `
CREATE TABLE deleted_items (
id INTEGER PRIMARY KEY,
item_type INT NOT NULL,
item_id TEXT NOT NULL,
deleted_time INT NOT NULL,
sync_target INT NOT NULL
);
`;
queries.push({ sql: 'DROP TABLE deleted_items' });
queries.push({ sql: this.sqlStringToLines(newTableSql)[0] });
queries.push({ sql: 'CREATE INDEX deleted_items_sync_target ON deleted_items (sync_target)' });
}
if (targetVersion == 3) {
queries = this.alterColumnQueries('settings', { key: 'TEXT PRIMARY KEY', value: 'TEXT' });
}
if (targetVersion == 4) {
queries.push('INSERT INTO settings (`key`, `value`) VALUES (\'sync.3.context\', (SELECT `value` FROM settings WHERE `key` = \'sync.context\'))');
queries.push('DELETE FROM settings WHERE `key` = "sync.context"');
}
if (targetVersion == 5) {
const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources'];
for (let i = 0; i < tableNames.length; i++) {
const n = tableNames[i];
queries.push(`ALTER TABLE ${n} ADD COLUMN user_created_time INT NOT NULL DEFAULT 0`);
queries.push(`ALTER TABLE ${n} ADD COLUMN user_updated_time INT NOT NULL DEFAULT 0`);
queries.push(`UPDATE ${n} SET user_created_time = created_time`);
queries.push(`UPDATE ${n} SET user_updated_time = updated_time`);
queries.push(`CREATE INDEX ${n}_user_updated_time ON ${n} (user_updated_time)`);
}
}
if (targetVersion == 6) {
queries.push('CREATE TABLE alarms (id INTEGER PRIMARY KEY AUTOINCREMENT, note_id TEXT NOT NULL, trigger_time INT NOT NULL)');
queries.push('CREATE INDEX alarm_note_id ON alarms (note_id)');
}
if (targetVersion == 7) {
queries.push('ALTER TABLE resources ADD COLUMN file_extension TEXT NOT NULL DEFAULT ""');
}
if (targetVersion == 8) {
queries.push('ALTER TABLE sync_items ADD COLUMN sync_disabled INT NOT NULL DEFAULT "0"');
queries.push('ALTER TABLE sync_items ADD COLUMN sync_disabled_reason TEXT NOT NULL DEFAULT ""');
}
if (targetVersion == 9) {
const newTableSql = `
CREATE TABLE master_keys (
id TEXT PRIMARY KEY,
created_time INT NOT NULL,
updated_time INT NOT NULL,
source_application TEXT NOT NULL,
encryption_method INT NOT NULL,
checksum TEXT NOT NULL,
content TEXT NOT NULL
);
`;
queries.push(this.sqlStringToLines(newTableSql)[0]);
const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources'];
for (let i = 0; i < tableNames.length; i++) {
const n = tableNames[i];
queries.push(`ALTER TABLE ${n} ADD COLUMN encryption_cipher_text TEXT NOT NULL DEFAULT ""`);
queries.push(`ALTER TABLE ${n} ADD COLUMN encryption_applied INT NOT NULL DEFAULT 0`);
queries.push(`CREATE INDEX ${n}_encryption_applied ON ${n} (encryption_applied)`);
}
queries.push('ALTER TABLE sync_items ADD COLUMN force_sync INT NOT NULL DEFAULT 0');
queries.push('ALTER TABLE resources ADD COLUMN encryption_blob_encrypted INT NOT NULL DEFAULT 0');
}
const upgradeVersion10 = () => {
const itemChangesTable = `
CREATE TABLE item_changes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_type INT NOT NULL,
item_id TEXT NOT NULL,
type INT NOT NULL,
created_time INT NOT NULL
);
`;
const noteResourcesTable = `
CREATE TABLE note_resources (
id INTEGER PRIMARY KEY,
note_id TEXT NOT NULL,
resource_id TEXT NOT NULL,
is_associated INT NOT NULL,
last_seen_time INT NOT NULL
);
`;
queries.push(this.sqlStringToLines(itemChangesTable)[0]);
queries.push('CREATE INDEX item_changes_item_id ON item_changes (item_id)');
queries.push('CREATE INDEX item_changes_created_time ON item_changes (created_time)');
queries.push('CREATE INDEX item_changes_item_type ON item_changes (item_type)');
queries.push(this.sqlStringToLines(noteResourcesTable)[0]);
queries.push('CREATE INDEX note_resources_note_id ON note_resources (note_id)');
queries.push('CREATE INDEX note_resources_resource_id ON note_resources (resource_id)');
queries.push({ sql: 'INSERT INTO item_changes (item_type, item_id, type, created_time) SELECT 1, id, 1, ? FROM notes', params: [Date.now()] });
};
if (targetVersion == 10) {
upgradeVersion10();
}
if (targetVersion == 11) {
// This trick was needed because Electron Builder incorrectly released a dev branch containing v10 as it was
// still being developed, and the db schema was not final at that time. So this v11 was created to
// make sure any invalid db schema that was accidentally created was deleted and recreated.
queries.push('DROP TABLE item_changes');
queries.push('DROP TABLE note_resources');
upgradeVersion10();
}
if (targetVersion == 12) {
queries.push('ALTER TABLE folders ADD COLUMN parent_id TEXT NOT NULL DEFAULT ""');
}
if (targetVersion == 13) {
queries.push('ALTER TABLE resources ADD COLUMN fetch_status INT NOT NULL DEFAULT "2"');
queries.push('ALTER TABLE resources ADD COLUMN fetch_error TEXT NOT NULL DEFAULT ""');
queries.push({ sql: 'UPDATE resources SET fetch_status = ?', params: [Resource.FETCH_STATUS_DONE] });
}
if (targetVersion == 14) {
const resourceLocalStates = `
CREATE TABLE resource_local_states (
id INTEGER PRIMARY KEY,
resource_id TEXT NOT NULL,
fetch_status INT NOT NULL DEFAULT "2",
fetch_error TEXT NOT NULL DEFAULT ""
);
`;
queries.push(this.sqlStringToLines(resourceLocalStates)[0]);
queries.push('INSERT INTO resource_local_states SELECT null, id, fetch_status, fetch_error FROM resources');
queries.push('CREATE INDEX resource_local_states_resource_id ON resource_local_states (resource_id)');
queries.push('CREATE INDEX resource_local_states_resource_fetch_status ON resource_local_states (fetch_status)');
queries = queries.concat(
this.alterColumnQueries('resources', {
id: 'TEXT PRIMARY KEY',
title: 'TEXT NOT NULL DEFAULT ""',
mime: 'TEXT NOT NULL',
filename: 'TEXT NOT NULL DEFAULT ""',
created_time: 'INT NOT NULL',
updated_time: 'INT NOT NULL',
user_created_time: 'INT NOT NULL DEFAULT 0',
user_updated_time: 'INT NOT NULL DEFAULT 0',
file_extension: 'TEXT NOT NULL DEFAULT ""',
encryption_cipher_text: 'TEXT NOT NULL DEFAULT ""',
encryption_applied: 'INT NOT NULL DEFAULT 0',
encryption_blob_encrypted: 'INT NOT NULL DEFAULT 0',
})
);
}
if (targetVersion == 15) {
queries.push('CREATE VIRTUAL TABLE notes_fts USING fts4(content="notes", notindexed="id", id, title, body)');
queries.push('INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0');
// Keep the content tables (notes) and the FTS table (notes_fts) in sync.
// More info at https://www.sqlite.org/fts3.html#_external_content_fts4_tables_
queries.push(`
CREATE TRIGGER notes_fts_before_update BEFORE UPDATE ON notes BEGIN
DELETE FROM notes_fts WHERE docid=old.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_fts_before_delete BEFORE DELETE ON notes BEGIN
DELETE FROM notes_fts WHERE docid=old.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_after_update AFTER UPDATE ON notes BEGIN
INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0 AND new.rowid = notes.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_after_insert AFTER INSERT ON notes BEGIN
INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes WHERE is_conflict = 0 AND encryption_applied = 0 AND new.rowid = notes.rowid;
END;`);
}
if (targetVersion == 18) {
const notesNormalized = `
CREATE TABLE notes_normalized (
id TEXT NOT NULL,
title TEXT NOT NULL DEFAULT "",
body TEXT NOT NULL DEFAULT ""
);
`;
queries.push(this.sqlStringToLines(notesNormalized)[0]);
queries.push('CREATE INDEX notes_normalized_id ON notes_normalized (id)');
queries.push('DROP TRIGGER IF EXISTS notes_fts_before_update');
queries.push('DROP TRIGGER IF EXISTS notes_fts_before_delete');
queries.push('DROP TRIGGER IF EXISTS notes_after_update');
queries.push('DROP TRIGGER IF EXISTS notes_after_insert');
queries.push('DROP TABLE IF EXISTS notes_fts');
queries.push('CREATE VIRTUAL TABLE notes_fts USING fts4(content="notes_normalized", notindexed="id", id, title, body)');
// Keep the content tables (notes) and the FTS table (notes_fts) in sync.
// More info at https://www.sqlite.org/fts3.html#_external_content_fts4_tables_
queries.push(`
CREATE TRIGGER notes_fts_before_update BEFORE UPDATE ON notes_normalized BEGIN
DELETE FROM notes_fts WHERE docid=old.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_fts_before_delete BEFORE DELETE ON notes_normalized BEGIN
DELETE FROM notes_fts WHERE docid=old.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_after_update AFTER UPDATE ON notes_normalized BEGIN
INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes_normalized WHERE new.rowid = notes_normalized.rowid;
END;`);
queries.push(`
CREATE TRIGGER notes_after_insert AFTER INSERT ON notes_normalized BEGIN
INSERT INTO notes_fts(docid, id, title, body) SELECT rowid, id, title, body FROM notes_normalized WHERE new.rowid = notes_normalized.rowid;
END;`);
}
if (targetVersion == 19) {
const newTableSql = `
CREATE TABLE revisions (
id TEXT PRIMARY KEY,
parent_id TEXT NOT NULL DEFAULT "",
item_type INT NOT NULL,
item_id TEXT NOT NULL,
item_updated_time INT NOT NULL,
title_diff TEXT NOT NULL DEFAULT "",
body_diff TEXT NOT NULL DEFAULT "",
metadata_diff TEXT NOT NULL DEFAULT "",
encryption_cipher_text TEXT NOT NULL DEFAULT "",
encryption_applied INT NOT NULL DEFAULT 0,
updated_time INT NOT NULL,
created_time INT NOT NULL
);
`;
queries.push(this.sqlStringToLines(newTableSql)[0]);
queries.push('CREATE INDEX revisions_parent_id ON revisions (parent_id)');
queries.push('CREATE INDEX revisions_item_type ON revisions (item_type)');
queries.push('CREATE INDEX revisions_item_id ON revisions (item_id)');
queries.push('CREATE INDEX revisions_item_updated_time ON revisions (item_updated_time)');
queries.push('CREATE INDEX revisions_updated_time ON revisions (updated_time)');
queries.push('ALTER TABLE item_changes ADD COLUMN source INT NOT NULL DEFAULT 1');
queries.push('ALTER TABLE item_changes ADD COLUMN before_change_item TEXT NOT NULL DEFAULT ""');
}
if (targetVersion == 20) {
const newTableSql = `
CREATE TABLE migrations (
id INTEGER PRIMARY KEY,
number INTEGER NOT NULL,
updated_time INT NOT NULL,
created_time INT NOT NULL
);
`;
queries.push(this.sqlStringToLines(newTableSql)[0]);
queries.push('ALTER TABLE resources ADD COLUMN `size` INT NOT NULL DEFAULT -1');
queries.push(this.addMigrationFile(20));
}
if (targetVersion == 21) {
queries.push('ALTER TABLE sync_items ADD COLUMN item_location INT NOT NULL DEFAULT 1');
}
if (targetVersion == 22) {
const newTableSql = `
CREATE TABLE resources_to_download (
id INTEGER PRIMARY KEY,
resource_id TEXT NOT NULL,
updated_time INT NOT NULL,
created_time INT NOT NULL
);
`;
queries.push(this.sqlStringToLines(newTableSql)[0]);
queries.push('CREATE INDEX resources_to_download_resource_id ON resources_to_download (resource_id)');
queries.push('CREATE INDEX resources_to_download_updated_time ON resources_to_download (updated_time)');
}
if (targetVersion == 23) {
const newTableSql = `
CREATE TABLE key_values (
id INTEGER PRIMARY KEY,
\`key\` TEXT NOT NULL,
\`value\` TEXT NOT NULL,
\`type\` INT NOT NULL,
updated_time INT NOT NULL
);
`;
queries.push(this.sqlStringToLines(newTableSql)[0]);
queries.push('CREATE UNIQUE INDEX key_values_key ON key_values (key)');
}
if (targetVersion == 24) {
queries.push('ALTER TABLE notes ADD COLUMN `markup_language` INT NOT NULL DEFAULT 1'); // 1: Markdown, 2: HTML
}
if (targetVersion == 25) {
queries.push(`CREATE VIEW tags_with_note_count AS
SELECT tags.id as id, tags.title as title, tags.created_time as created_time, tags.updated_time as updated_time, COUNT(notes.id) as note_count
FROM tags
LEFT JOIN note_tags nt on nt.tag_id = tags.id
LEFT JOIN notes on notes.id = nt.note_id
WHERE notes.id IS NOT NULL
GROUP BY tags.id`);
}
if (targetVersion == 26) {
const tableNames = ['notes', 'folders', 'tags', 'note_tags', 'resources'];
for (let i = 0; i < tableNames.length; i++) {
const n = tableNames[i];
queries.push(`ALTER TABLE ${n} ADD COLUMN is_shared INT NOT NULL DEFAULT 0`);
}
}
if (targetVersion == 27) {
queries.push(this.addMigrationFile(27));
}
if (targetVersion == 28) {
queries.push('CREATE INDEX resources_size ON resources(size)');
}
queries.push({ sql: 'UPDATE version SET version = ?', params: [targetVersion] });
try {
await this.transactionExecBatch(queries);
} catch (error) {
if (targetVersion === 15 || targetVersion === 18) {
this.logger().warn('Could not upgrade to database v15 or v18 - FTS feature will not be used', error);
} else {
throw error;
}
}
latestVersion = targetVersion;
currentVersionIndex++;
}
return latestVersion;
}
async ftsEnabled() {
try {
await this.selectOne('SELECT count(*) FROM notes_fts');
} catch (error) {
this.logger().warn('FTS check failed', error);
return false;
}
this.logger().info('FTS check succeeded');
return true;
}
version() {
return this.version_;
}
async initialize() {
this.logger().info('Checking for database schema update...');
let versionRow = null;
try {
// Will throw if the database has not been created yet, but this is handled below
versionRow = await this.selectOne('SELECT * FROM version LIMIT 1');
} catch (error) {
if (error.message && error.message.indexOf('no such table: version') >= 0) {
// Ignore
} else {
console.info(error);
}
}
const version = !versionRow ? 0 : versionRow.version;
this.version_ = version;
this.logger().info('Current database version', version);
const newVersion = await this.upgradeDatabase(version);
this.version_ = newVersion;
if (newVersion !== version) await this.refreshTableFields();
this.tableFields_ = {};
const rows = await this.selectAll('SELECT * FROM table_fields');
for (let i = 0; i < rows.length; i++) {
const row = rows[i];
if (!this.tableFields_[row.table_name]) this.tableFields_[row.table_name] = [];
this.tableFields_[row.table_name].push({
name: row.field_name,
type: row.field_type,
default: Database.formatValue(row.field_type, row.field_default),
});
}
}
}
Database.TYPE_INT = 1;
Database.TYPE_TEXT = 2;
Database.TYPE_NUMERIC = 3;
module.exports = { JoplinDatabase };
| 1 | 13,394 | This is not going to work. You need to add a migration to the database. | laurent22-joplin | js |
@@ -308,6 +308,7 @@ class Manager {
* @param {Object} apiClient The ApiClient.
*/
resumeGroupPlayback(apiClient) {
+ // TODO: rename this method, it's not clear what it does.
this.followGroupPlayback(apiClient).then(() => {
this.queueCore.startPlayback(apiClient);
}); | 1 | /**
* Module that manages the SyncPlay feature.
* @module components/syncPlay/core/Manager
*/
import { Events } from 'jellyfin-apiclient';
import * as Helper from './Helper';
import TimeSyncCore from './timeSync/TimeSyncCore';
import PlaybackCore from './PlaybackCore';
import QueueCore from './QueueCore';
import Controller from './Controller';
import toast from '../../toast/toast';
import globalize from '../../../scripts/globalize';
/**
* Class that manages the SyncPlay feature.
*/
class Manager {
/**
* Creates an instance of SyncPlay Manager.
* @param {PlayerFactory} playerFactory The PlayerFactory instance.
*/
constructor(playerFactory) {
this.playerFactory = playerFactory;
this.apiClient = null;
this.timeSyncCore = new TimeSyncCore();
this.playbackCore = new PlaybackCore();
this.queueCore = new QueueCore();
this.controller = new Controller();
this.syncMethod = 'None'; // Used for stats.
this.groupInfo = null;
this.syncPlayEnabledAt = null; // Server time of when SyncPlay has been enabled.
this.syncPlayReady = false; // SyncPlay is ready after first ping to server.
this.queuedCommand = null; // Queued playback command, applied when SyncPlay is ready.
this.followingGroupPlayback = true; // Follow or ignore group playback.
this.lastPlaybackCommand = null; // Last received playback command from server, tracks state of group.
this.currentPlayer = null;
this.playerWrapper = null;
}
/**
* Initialise SyncPlay.
* @param {Object} apiClient The ApiClient.
*/
init(apiClient) {
if (!apiClient) {
throw new Error('ApiClient is null!');
}
// Set ApiClient.
this.apiClient = apiClient;
// Get default player wrapper.
this.playerWrapper = this.playerFactory.getDefaultWrapper(this);
// Initialize components.
this.timeSyncCore.init(this);
this.playbackCore.init(this);
this.queueCore.init(this);
this.controller.init(this);
Events.on(this.timeSyncCore, 'time-sync-server-update', (event, timeOffset, ping) => {
// Report ping back to server.
if (this.syncEnabled) {
this.getApiClient().sendSyncPlayPing({
Ping: ping
});
}
});
}
/**
* Gets the time sync core.
* @returns {TimeSyncCore} The time sync core.
*/
getTimeSyncCore() {
return this.timeSyncCore;
}
/**
* Gets the playback core.
* @returns {PlaybackCore} The playback core.
*/
getPlaybackCore() {
return this.playbackCore;
}
/**
* Gets the queue core.
* @returns {QueueCore} The queue core.
*/
getQueueCore() {
return this.queueCore;
}
/**
* Gets the controller used to manage SyncPlay playback.
* @returns {Controller} The controller.
*/
getController() {
return this.controller;
}
/**
* Gets the player wrapper used to control local playback.
* @returns {SyncPlayGenericPlayer} The player wrapper.
*/
getPlayerWrapper() {
return this.playerWrapper;
}
/**
* Gets the ApiClient used to communicate with the server.
* @returns {Object} The ApiClient.
*/
getApiClient() {
return this.apiClient;
}
/**
* Gets the last playback command, if any.
* @returns {Object} The playback command.
*/
getLastPlaybackCommand() {
return this.lastPlaybackCommand;
}
/**
* Called when the player changes.
*/
onPlayerChange(newPlayer) {
this.bindToPlayer(newPlayer);
}
/**
* Binds to the player's events.
* @param {Object} player The player.
*/
bindToPlayer(player) {
this.releaseCurrentPlayer();
if (!player) {
return;
}
this.playerWrapper.unbindFromPlayer();
this.currentPlayer = player;
this.playerWrapper = this.playerFactory.getWrapper(player, this);
if (this.isSyncPlayEnabled()) {
this.playerWrapper.bindToPlayer();
}
Events.trigger(this, 'playerchange', [this.currentPlayer]);
}
/**
* Removes the bindings from the current player's events.
*/
releaseCurrentPlayer() {
this.currentPlayer = null;
this.playerWrapper.unbindFromPlayer();
this.playerWrapper = this.playerFactory.getDefaultWrapper(this);
if (this.isSyncPlayEnabled()) {
this.playerWrapper.bindToPlayer();
}
Events.trigger(this, 'playerchange', [this.currentPlayer]);
}
/**
* Handles a group update from the server.
* @param {Object} cmd The group update.
* @param {Object} apiClient The ApiClient.
*/
processGroupUpdate(cmd, apiClient) {
switch (cmd.Type) {
case 'PlayQueue':
this.queueCore.updatePlayQueue(apiClient, cmd.Data);
break;
case 'UserJoined':
toast(globalize.translate('MessageSyncPlayUserJoined', cmd.Data));
break;
case 'UserLeft':
toast(globalize.translate('MessageSyncPlayUserLeft', cmd.Data));
break;
case 'GroupJoined':
cmd.Data.LastUpdatedAt = new Date(cmd.Data.LastUpdatedAt);
this.enableSyncPlay(apiClient, cmd.Data, true);
break;
case 'SyncPlayIsDisabled':
toast(globalize.translate('MessageSyncPlayIsDisabled'));
break;
case 'NotInGroup':
case 'GroupLeft':
this.disableSyncPlay(true);
break;
case 'GroupUpdate':
cmd.Data.LastUpdatedAt = new Date(cmd.Data.LastUpdatedAt);
this.groupInfo = cmd.Data;
break;
case 'StateUpdate':
Events.trigger(this, 'group-state-update', [cmd.Data.State, cmd.Data.Reason]);
console.debug(`SyncPlay processGroupUpdate: state changed to ${cmd.Data.State} because ${cmd.Data.Reason}.`);
break;
case 'GroupDoesNotExist':
toast(globalize.translate('MessageSyncPlayGroupDoesNotExist'));
break;
case 'CreateGroupDenied':
toast(globalize.translate('MessageSyncPlayCreateGroupDenied'));
break;
case 'JoinGroupDenied':
toast(globalize.translate('MessageSyncPlayJoinGroupDenied'));
break;
case 'LibraryAccessDenied':
toast(globalize.translate('MessageSyncPlayLibraryAccessDenied'));
break;
default:
console.error(`SyncPlay processGroupUpdate: command ${cmd.Type} not recognised.`);
break;
}
}
/**
* Handles a playback command from the server.
* @param {Object} cmd The playback command.
*/
processCommand(cmd) {
if (cmd === null) return;
if (typeof cmd.When === 'string') {
cmd.When = new Date(cmd.When);
cmd.EmittedAt = new Date(cmd.EmittedAt);
cmd.PositionTicks = cmd.PositionTicks ? parseInt(cmd.PositionTicks) : null;
}
if (!this.isSyncPlayEnabled()) {
console.debug('SyncPlay processCommand: SyncPlay not enabled, ignoring command.', cmd);
return;
}
if (cmd.EmittedAt.getTime() < this.syncPlayEnabledAt.getTime()) {
console.debug('SyncPlay processCommand: ignoring old command.', cmd);
return;
}
if (!this.syncPlayReady) {
console.debug('SyncPlay processCommand: SyncPlay not ready, queued command.', cmd);
this.queuedCommand = cmd;
return;
}
this.lastPlaybackCommand = cmd;
if (!this.isPlaybackActive()) {
console.debug('SyncPlay processCommand: no active player!');
return;
}
// Make sure command matches playing item in playlist.
const playlistItemId = this.queueCore.getCurrentPlaylistItemId();
if (cmd.PlaylistItemId !== playlistItemId && cmd.Command !== 'Stop') {
console.error('SyncPlay processCommand: playlist item does not match!', cmd);
return;
}
console.log(`SyncPlay will ${cmd.Command} at ${cmd.When} (in ${cmd.When.getTime() - Date.now()} ms)${cmd.PositionTicks ? '' : ' from ' + cmd.PositionTicks}.`);
this.playbackCore.applyCommand(cmd);
}
/**
* Handles a group state change.
* @param {Object} update The group state update.
*/
processStateChange(update) {
if (update === null || update.State === null || update.Reason === null) return;
if (!this.isSyncPlayEnabled()) {
console.debug('SyncPlay processStateChange: SyncPlay not enabled, ignoring group state update.', update);
return;
}
Events.trigger(this, 'group-state-change', [update.State, update.Reason]);
}
/**
* Notifies server that this client is following group's playback.
* @param {Object} apiClient The ApiClient.
* @returns {Promise} A Promise fulfilled upon request completion.
*/
followGroupPlayback(apiClient) {
this.followingGroupPlayback = true;
return apiClient.requestSyncPlaySetIgnoreWait({
IgnoreWait: false
});
}
/**
* Starts this client's playback and loads the group's play queue.
* @param {Object} apiClient The ApiClient.
*/
resumeGroupPlayback(apiClient) {
this.followGroupPlayback(apiClient).then(() => {
this.queueCore.startPlayback(apiClient);
});
}
/**
* Stops this client's playback and notifies server to be ignored in group wait.
* @param {Object} apiClient The ApiClient.
*/
haltGroupPlayback(apiClient) {
this.followingGroupPlayback = false;
apiClient.requestSyncPlaySetIgnoreWait({
IgnoreWait: true
});
this.playbackCore.localStop();
}
/**
* Whether this client is following group playback.
* @returns {boolean} _true_ if client should play group's content, _false_ otherwise.
*/
isFollowingGroupPlayback() {
return this.followingGroupPlayback;
}
/**
* Enables SyncPlay.
* @param {Object} apiClient The ApiClient.
* @param {Object} groupInfo The joined group's info.
* @param {boolean} showMessage Display message.
*/
enableSyncPlay(apiClient, groupInfo, showMessage = false) {
if (this.isSyncPlayEnabled()) {
if (groupInfo.GroupId === this.groupInfo.GroupId) {
console.debug(`SyncPlay enableSyncPlay: group ${this.groupInfo.GroupId} already joined.`);
return;
} else {
console.warn(`SyncPlay enableSyncPlay: switching from group ${this.groupInfo.GroupId} to group ${groupInfo.GroupId}.`);
this.disableSyncPlay(false);
}
showMessage = false;
}
this.groupInfo = groupInfo;
this.syncPlayEnabledAt = groupInfo.LastUpdatedAt;
this.playerWrapper.bindToPlayer();
Events.trigger(this, 'enabled', [true]);
// Wait for time sync to be ready.
Helper.waitForEventOnce(this.timeSyncCore, 'time-sync-server-update').then(() => {
this.syncPlayReady = true;
this.processCommand(this.queuedCommand, apiClient);
this.queuedCommand = null;
});
this.syncPlayReady = false;
this.followingGroupPlayback = true;
this.timeSyncCore.forceUpdate();
if (showMessage) {
toast(globalize.translate('MessageSyncPlayEnabled'));
}
}
/**
* Disables SyncPlay.
* @param {boolean} showMessage Display message.
*/
disableSyncPlay(showMessage = false) {
this.syncPlayEnabledAt = null;
this.syncPlayReady = false;
this.followingGroupPlayback = true;
this.lastPlaybackCommand = null;
this.queuedCommand = null;
this.playbackCore.syncEnabled = false;
Events.trigger(this, 'enabled', [false]);
this.playerWrapper.unbindFromPlayer();
if (showMessage) {
toast(globalize.translate('MessageSyncPlayDisabled'));
}
}
/**
* Gets SyncPlay status.
* @returns {boolean} _true_ if user joined a group, _false_ otherwise.
*/
isSyncPlayEnabled() {
return this.syncPlayEnabledAt !== null;
}
/**
* Gets the group information.
* @returns {Object} The group information, null if SyncPlay is disabled.
*/
getGroupInfo() {
return this.groupInfo;
}
/**
* Gets SyncPlay stats.
* @returns {Object} The SyncPlay stats.
*/
getStats() {
return {
TimeSyncDevice: this.timeSyncCore.getActiveDeviceName(),
TimeSyncOffset: this.timeSyncCore.getTimeOffset().toFixed(2),
PlaybackDiff: this.playbackCore.playbackDiffMillis.toFixed(2),
SyncMethod: this.syncMethod
};
}
/**
* Gets playback status.
* @returns {boolean} Whether a player is active.
*/
isPlaybackActive() {
return this.playerWrapper.isPlaybackActive();
}
/**
* Whether the player is remotely self-managed.
* @returns {boolean} _true_ if the player is remotely self-managed, _false_ otherwise.
*/
isRemote() {
return this.playerWrapper.isRemote();
}
/**
* Checks if playlist is empty.
* @returns {boolean} _true_ if playlist is empty, _false_ otherwise.
*/
isPlaylistEmpty() {
return this.queueCore.isPlaylistEmpty();
}
/**
* Checks if playback is unpaused.
* @returns {boolean} _true_ if media is playing, _false_ otherwise.
*/
isPlaying() {
if (!this.lastPlaybackCommand) {
return false;
} else {
return this.lastPlaybackCommand.Command === 'Unpause';
}
}
/**
* Emits an event to update the SyncPlay status icon.
*/
showSyncIcon(syncMethod) {
this.syncMethod = syncMethod;
Events.trigger(this, 'syncing', [true, this.syncMethod]);
}
/**
* Emits an event to clear the SyncPlay status icon.
*/
clearSyncIcon() {
this.syncMethod = 'None';
Events.trigger(this, 'syncing', [false, this.syncMethod]);
}
}
export default Manager;
| 1 | 18,945 | Should these methods be renamed in this PR? | jellyfin-jellyfin-web | js |
@@ -154,6 +154,18 @@ namespace OpenTelemetry.Metrics
{
var metricStreamConfig = metricStreamConfigs[i];
var metricStreamName = metricStreamConfig?.Name ?? instrument.Name;
+
+ if (!MeterProviderBuilderSdk.IsValidInstrumentName(metricStreamName))
+ {
+ OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(
+ instrument.Name,
+ instrument.Meter.Name,
+ "Metric name is invalid.",
+ "The name must comply with the OpenTelemetry specification.");
+
+ continue;
+ }
+
if (this.metricStreamNames.ContainsKey(metricStreamName))
{
// TODO: Log that instrument is ignored | 1 | // <copyright file="MeterProviderSdk.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Linq;
using System.Text.RegularExpressions;
using OpenTelemetry.Internal;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Metrics
{
internal sealed class MeterProviderSdk : MeterProvider
{
internal const int MaxMetrics = 1000;
internal int ShutdownCount;
private readonly Metric[] metrics;
private readonly List<object> instrumentations = new List<object>();
private readonly List<Func<Instrument, MetricStreamConfiguration>> viewConfigs;
private readonly object collectLock = new object();
private readonly object instrumentCreationLock = new object();
private readonly Dictionary<string, bool> metricStreamNames = new Dictionary<string, bool>(StringComparer.OrdinalIgnoreCase);
private readonly MeterListener listener;
private readonly MetricReader reader;
private int metricIndex = -1;
internal MeterProviderSdk(
Resource resource,
IEnumerable<string> meterSources,
List<MeterProviderBuilderBase.InstrumentationFactory> instrumentationFactories,
List<Func<Instrument, MetricStreamConfiguration>> viewConfigs,
IEnumerable<MetricReader> readers)
{
this.Resource = resource;
this.viewConfigs = viewConfigs;
this.metrics = new Metric[MaxMetrics];
AggregationTemporality temporality = AggregationTemporality.Cumulative;
foreach (var reader in readers)
{
if (reader == null)
{
throw new ArgumentException("A null value was found.", nameof(readers));
}
reader.SetParentProvider(this);
// TODO: Actually support multiple readers.
// Currently the last reader's temporality wins.
temporality = reader.PreferredAggregationTemporality;
if (this.reader == null)
{
this.reader = reader;
}
else if (this.reader is CompositeMetricReader compositeReader)
{
compositeReader.AddReader(reader);
}
else
{
this.reader = new CompositeMetricReader(new[] { this.reader, reader });
}
}
if (instrumentationFactories.Any())
{
foreach (var instrumentationFactory in instrumentationFactories)
{
this.instrumentations.Add(instrumentationFactory.Factory());
}
}
// Setup Listener
Func<Instrument, bool> shouldListenTo = instrument => false;
if (meterSources.Any(s => s.Contains('*')))
{
var regex = GetWildcardRegex(meterSources);
shouldListenTo = instrument => regex.IsMatch(instrument.Meter.Name);
}
else if (meterSources.Any())
{
var meterSourcesToSubscribe = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (var meterSource in meterSources)
{
meterSourcesToSubscribe.Add(meterSource);
}
shouldListenTo = instrument => meterSourcesToSubscribe.Contains(instrument.Meter.Name);
}
this.listener = new MeterListener();
var viewConfigCount = this.viewConfigs.Count;
if (viewConfigCount > 0)
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
// Creating list with initial capacity as the maximum
// possible size, to avoid any array resize/copy internally.
// There may be excess space wasted, but it'll eligible for
// GC right after this method.
var metricStreamConfigs = new List<MetricStreamConfiguration>(viewConfigCount);
foreach (var viewConfig in this.viewConfigs)
{
var metricStreamConfig = viewConfig(instrument);
if (metricStreamConfig != null)
{
metricStreamConfigs.Add(metricStreamConfig);
}
}
if (metricStreamConfigs.Count == 0)
{
// No views matched. Add null
// which will apply defaults.
// Users can turn off this default
// by adding a view like below as the last view.
// .AddView(instrumentName: "*", new MetricStreamConfiguration() { Aggregation = Aggregation.Drop })
metricStreamConfigs.Add(null);
}
var maxCountMetricsToBeCreated = metricStreamConfigs.Count;
// Create list with initial capacity as the max metric count.
// Due to duplicate/max limit, we may not end up using them
// all, and that memory is wasted until Meter disposed.
// TODO: Revisit to see if we need to do metrics.TrimExcess()
var metrics = new List<Metric>(maxCountMetricsToBeCreated);
lock (this.instrumentCreationLock)
{
for (int i = 0; i < maxCountMetricsToBeCreated; i++)
{
var metricStreamConfig = metricStreamConfigs[i];
var metricStreamName = metricStreamConfig?.Name ?? instrument.Name;
if (this.metricStreamNames.ContainsKey(metricStreamName))
{
// TODO: Log that instrument is ignored
// as the resulting Metric name is conflicting
// with existing name.
continue;
}
if (metricStreamConfig?.Aggregation == Aggregation.Drop)
{
// TODO: Log that instrument is ignored
// as user explicitly asked to drop it
// with View.
continue;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
// TODO: Log that instrument is ignored
// as max number of Metrics have reached.
}
else
{
Metric metric;
var metricDescription = metricStreamConfig?.Description ?? instrument.Description;
string[] tagKeysInteresting = metricStreamConfig?.TagKeys;
double[] histogramBucketBounds = (metricStreamConfig is HistogramConfiguration histogramConfig
&& histogramConfig.BucketBounds != null) ? histogramConfig.BucketBounds : null;
metric = new Metric(instrument, temporality, metricStreamName, metricDescription, histogramBucketBounds, tagKeysInteresting);
this.metrics[index] = metric;
metrics.Add(metric);
this.metricStreamNames.Add(metricStreamName, true);
}
}
if (metrics.Count > 0)
{
listener.EnableMeasurementEvents(instrument, metrics);
}
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDouble);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDouble(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLong);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLong(instrument, value, tags, state));
}
else
{
this.listener.InstrumentPublished = (instrument, listener) =>
{
if (!shouldListenTo(instrument))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "Instrument belongs to a Meter not subscribed by the provider.", "Use AddMeter to add the Meter to the provider.");
return;
}
try
{
var metricName = instrument.Name;
Metric metric = null;
lock (this.instrumentCreationLock)
{
if (this.metricStreamNames.ContainsKey(metricName))
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Metric name conflicting with existing name.", "Either change the name of the instrument or change name using View.");
return;
}
var index = ++this.metricIndex;
if (index >= MaxMetrics)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(metricName, instrument.Meter.Name, "Maximum allowed Metrics for the provider exceeded.", "Use views to drop unused instruments. Or configure Provider to allow higher limit.");
return;
}
else
{
metric = new Metric(instrument, temporality, metricName, instrument.Description);
this.metrics[index] = metric;
this.metricStreamNames.Add(metricName, true);
}
}
listener.EnableMeasurementEvents(instrument, metric);
}
catch (Exception)
{
OpenTelemetrySdkEventSource.Log.MetricInstrumentIgnored(instrument.Name, instrument.Meter.Name, "SDK internal error occurred.", "Contact SDK owners.");
}
};
// Everything double
this.listener.SetMeasurementEventCallback<double>(this.MeasurementRecordedDoubleSingleStream);
this.listener.SetMeasurementEventCallback<float>((instrument, value, tags, state) => this.MeasurementRecordedDoubleSingleStream(instrument, value, tags, state));
// Everything long
this.listener.SetMeasurementEventCallback<long>(this.MeasurementRecordedLongSingleStream);
this.listener.SetMeasurementEventCallback<int>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<short>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
this.listener.SetMeasurementEventCallback<byte>((instrument, value, tags, state) => this.MeasurementRecordedLongSingleStream(instrument, value, tags, state));
}
this.listener.MeasurementsCompleted = (instrument, state) => this.MeasurementsCompleted(instrument, state);
this.listener.Start();
static Regex GetWildcardRegex(IEnumerable<string> collection)
{
var pattern = '^' + string.Join("|", from name in collection select "(?:" + Regex.Escape(name).Replace("\\*", ".*") + ')') + '$';
return new Regex(pattern, RegexOptions.Compiled | RegexOptions.IgnoreCase);
}
}
internal Resource Resource { get; }
internal List<object> Instrumentations => this.instrumentations;
internal MetricReader Reader => this.reader;
internal void MeasurementsCompleted(Instrument instrument, object state)
{
Console.WriteLine($"Instrument {instrument.Meter.Name}:{instrument.Name} completed.");
}
internal void MeasurementRecordedDouble(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateDouble(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateDouble(value, tagsRos);
}
}
}
internal void MeasurementRecordedLong(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metrics = state as List<Metric>;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metrics == null)
{
// TODO: log
return;
}
if (metrics.Count == 1)
{
// special casing the common path
// as this is faster than the
// foreach, when count is 1.
metrics[0].UpdateLong(value, tagsRos);
}
else
{
foreach (var metric in metrics)
{
metric.UpdateLong(value, tagsRos);
}
}
}
internal void MeasurementRecordedLongSingleStream(Instrument instrument, long value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateLong(value, tagsRos);
}
internal void MeasurementRecordedDoubleSingleStream(Instrument instrument, double value, ReadOnlySpan<KeyValuePair<string, object>> tagsRos, object state)
{
// Get Instrument State
var metric = state as Metric;
Debug.Assert(instrument != null, "instrument must be non-null.");
if (metric == null)
{
// TODO: log
return;
}
metric.UpdateDouble(value, tagsRos);
}
internal Batch<Metric> Collect()
{
lock (this.collectLock)
{
try
{
// Record all observable instruments
try
{
this.listener.RecordObservableInstruments();
}
catch (Exception exception)
{
// TODO:
// It doesn't looks like we can find which instrument callback
// threw.
OpenTelemetrySdkEventSource.Log.MetricObserverCallbackException(exception);
}
var indexSnapShot = Math.Min(this.metricIndex, MaxMetrics - 1);
var target = indexSnapShot + 1;
for (int i = 0; i < target; i++)
{
this.metrics[i].SnapShot();
}
return (target > 0) ? new Batch<Metric>(this.metrics, target) : default;
}
catch (Exception)
{
// TODO: Log
return default;
}
}
}
/// <summary>
/// Called by <c>ForceFlush</c>. This function should block the current
/// thread until flush completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when flush succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>ForceFlush</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnForceFlush(int timeoutMilliseconds)
{
return this.reader?.Collect(timeoutMilliseconds) ?? true;
}
/// <summary>
/// Called by <c>Shutdown</c>. This function should block the current
/// thread until shutdown completed or timed out.
/// </summary>
/// <param name="timeoutMilliseconds">
/// The number (non-negative) of milliseconds to wait, or
/// <c>Timeout.Infinite</c> to wait indefinitely.
/// </param>
/// <returns>
/// Returns <c>true</c> when shutdown succeeded; otherwise, <c>false</c>.
/// </returns>
/// <remarks>
/// This function is called synchronously on the thread which made the
/// first call to <c>Shutdown</c>. This function should not throw
/// exceptions.
/// </remarks>
internal bool OnShutdown(int timeoutMilliseconds)
{
return this.reader?.Shutdown(timeoutMilliseconds) ?? true;
}
protected override void Dispose(bool disposing)
{
if (this.instrumentations != null)
{
foreach (var item in this.instrumentations)
{
(item as IDisposable)?.Dispose();
}
this.instrumentations.Clear();
}
// Wait for up to 5 seconds grace period
this.reader?.Shutdown(5000);
this.reader?.Dispose();
this.listener.Dispose();
}
}
}
| 1 | 21,982 | we need to include the `metricStreamName` which is invalid, so users know whats causing the issue. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -93,8 +93,10 @@ namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation
var pdbFilePath = Path.ChangeExtension(binaryPath, ".pdb");
using (var pdbReader = new PortablePdbReader(new FileHelper().GetStream(pdbFilePath, FileMode.Open, FileAccess.Read)))
{
- // Load assembly
- var asm = AssemblyLoadContext.Default.LoadFromAssemblyPath(binaryPath);
+ // At this point, the assembly should be already loaded into the load context. We query for a reference to
+ // find the types and cache the symbol information. Let the loader follow default lookup order instead of
+ // forcing load from a specific path.
+ var asm = Assembly.Load(AssemblyLoadContext.GetAssemblyName(binaryPath));
foreach (var type in asm.GetTypes())
{ | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.VisualStudio.TestPlatform.ObjectModel.Navigation
{
#if !NET46
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using System.Runtime.Loader;
using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers;
/// <summary>
/// The portable symbol reader.
/// </summary>
internal class PortableSymbolReader : ISymbolReader
{
/// <summary>
/// Key in first dict is Type FullName
/// Key in second dict is method name
/// </summary>
private Dictionary<string, Dictionary<string, DiaNavigationData>> methodsNavigationDataForType =
new Dictionary<string, Dictionary<string, DiaNavigationData>>();
/// <summary>
/// The cache symbols.
/// </summary>
/// <param name="binaryPath">
/// The binary path.
/// </param>
/// <param name="searchPath">
/// The search path.
/// </param>
public void CacheSymbols(string binaryPath, string searchPath)
{
this.PopulateCacheForTypeAndMethodSymbols(binaryPath);
}
/// <summary>
/// The dispose.
/// </summary>
public void Dispose()
{
foreach (var methodsNavigationData in this.methodsNavigationDataForType.Values)
{
methodsNavigationData.Clear();
}
this.methodsNavigationDataForType.Clear();
this.methodsNavigationDataForType = null;
}
/// <summary>
/// The get navigation data.
/// </summary>
/// <param name="declaringTypeName">
/// The declaring type name.
/// </param>
/// <param name="methodName">
/// The method name.
/// </param>
/// <returns>
/// The <see cref="INavigationData"/>.
/// </returns>
public INavigationData GetNavigationData(string declaringTypeName, string methodName)
{
INavigationData navigationData = null;
if (this.methodsNavigationDataForType.ContainsKey(declaringTypeName))
{
var methodDict = this.methodsNavigationDataForType[declaringTypeName];
if (methodDict.ContainsKey(methodName))
{
navigationData = methodDict[methodName];
}
}
return navigationData;
}
/// <summary>
/// The populate cache for type and method symbols.
/// </summary>
/// <param name="binaryPath">
/// The binary path.
/// </param>
private void PopulateCacheForTypeAndMethodSymbols(string binaryPath)
{
try
{
var pdbFilePath = Path.ChangeExtension(binaryPath, ".pdb");
using (var pdbReader = new PortablePdbReader(new FileHelper().GetStream(pdbFilePath, FileMode.Open, FileAccess.Read)))
{
// Load assembly
var asm = AssemblyLoadContext.Default.LoadFromAssemblyPath(binaryPath);
foreach (var type in asm.GetTypes())
{
// Get declared method infos
var methodInfoList = ((TypeInfo)type.GetTypeInfo()).DeclaredMethods;
var methodsNavigationData = new Dictionary<string, DiaNavigationData>();
foreach (var methodInfo in methodInfoList)
{
var diaNavigationData = pdbReader.GetDiaNavigationData(methodInfo);
if (diaNavigationData != null)
{
methodsNavigationData[methodInfo.Name] = diaNavigationData;
}
else
{
EqtTrace.Error(
string.Format(
"Unable to find source information for method: {0} type: {1}",
methodInfo.Name,
type.FullName));
}
}
if (methodsNavigationData.Count != 0)
{
this.methodsNavigationDataForType[type.FullName] = methodsNavigationData;
}
}
}
}
catch (Exception)
{
this.Dispose();
throw;
}
}
}
#endif
} | 1 | 11,704 | Please run Platform tests `DiaSessionTests`. | microsoft-vstest | .cs |
@@ -741,6 +741,7 @@ read_evex(byte *pc, decode_info_t *di, byte instr_byte,
return pc;
}
*is_evex = true;
+ SYSLOG_INTERNAL_WARNING_ONCE(MSG_AVX_512_SUPPORT_INCOMPLETE_STRING " @" PFX, pc);
info = &evex_prefix_extensions[0][1];
} else {
/* not evex */ | 1 | /* **********************************************************
* Copyright (c) 2011-2019 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* decode.c -- a full x86 decoder */
#include "../globals.h"
#include "arch.h"
#include "instr.h"
#include "decode.h"
#include "decode_fast.h"
#include "decode_private.h"
/*
* XXX i#431: consider cpuid features when deciding invalid instrs:
* for core DR, it doesn't really matter: the only bad thing is thinking
* a valid instr is invalid, esp decoding its size improperly.
* but for completeness and use as disassembly library might be nice.
*
* XXX (these are very old):
* 1) several opcodes gdb thinks bad/not bad -- changes to ISA?
* 2) why does gdb use Ex, Ed, & Ev for all float opcodes w/ modrm < 0xbf?
* a float instruction's modrm cannot specify a register, right?
* sizes are single-real => d, double-real => q, extended-real = 90 bits,
* 14/28 bytes, and 98/108 bytes!
* should address sizes all be 'v'?
* 3) there don't seem to be any immediate float values...?!?
* 4) fld (0xd9 0xc0-7) in Table A-10 has 2 operands in different order
* than expected, plus asm prints using just 2nd one
* 5) I don't see T...is there a T? gdb has it for 0f26mov
*/
/* N.B.: must justify each assert, since we do not want to assert on a bad
* instruction -- we want to fail gracefully and have the caller deal with it
*/
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* used for VEX decoding */
#define xx TYPE_NONE, OPSZ_NA
static const instr_info_t escape_instr = { ESCAPE, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t escape_38_instr = {
ESCAPE_3BYTE_38, 0x000000, "(bad)", xx, xx, xx, xx, xx, 0, 0, 0
};
static const instr_info_t escape_3a_instr = {
ESCAPE_3BYTE_3a, 0x000000, "(bad)", xx, xx, xx, xx, xx, 0, 0, 0
};
/* used for XOP decoding */
static const instr_info_t xop_8_instr = { XOP_8_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t xop_9_instr = { XOP_9_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
static const instr_info_t xop_a_instr = { XOP_A_EXT, 0x000000, "(bad)", xx, xx, xx,
xx, xx, 0, 0, 0 };
#undef xx
bool
is_isa_mode_legal(dr_isa_mode_t mode)
{
#ifdef X64
return (mode == DR_ISA_IA32 || mode == DR_ISA_AMD64);
#else
return (mode == DR_ISA_IA32);
#endif
}
app_pc
canonicalize_pc_target(dcontext_t *dcontext, app_pc pc)
{
return pc;
}
#ifdef X64
bool
set_x86_mode(dcontext_t *dcontext, bool x86)
{
dr_isa_mode_t old_mode;
if (!dr_set_isa_mode(dcontext, x86 ? DR_ISA_IA32 : DR_ISA_AMD64, &old_mode))
return false;
return old_mode == DR_ISA_IA32;
}
bool
get_x86_mode(dcontext_t *dcontext)
{
return dr_get_isa_mode(dcontext) == DR_ISA_IA32;
}
#endif
/****************************************************************************
* All code below based on tables in the ``Intel Architecture Software
* Developer's Manual,'' Volume 2: Instruction Set Reference, 2001.
*/
#if defined(DEBUG) && !defined(STANDALONE_DECODER) /* currently only used in ASSERTs */
static bool
is_variable_size(opnd_size_t sz)
{
switch (sz) {
case OPSZ_2_short1:
case OPSZ_4_short2:
case OPSZ_4x8:
case OPSZ_4x8_short2:
case OPSZ_4x8_short2xi8:
case OPSZ_4_short2xi4:
case OPSZ_4_rex8_short2:
case OPSZ_4_rex8:
case OPSZ_6_irex10_short4:
case OPSZ_6x10:
case OPSZ_8_short2:
case OPSZ_8_short4:
case OPSZ_28_short14:
case OPSZ_108_short94:
case OPSZ_1_reg4:
case OPSZ_2_reg4:
case OPSZ_4_reg16:
case OPSZ_32_short16:
case OPSZ_8_rex16:
case OPSZ_8_rex16_short4:
case OPSZ_12_rex40_short6:
case OPSZ_16_vex32:
case OPSZ_16_vex32_evex64: return true;
default: return false;
}
}
#endif
opnd_size_t
resolve_var_reg_size(opnd_size_t sz, bool is_reg)
{
switch (sz) {
case OPSZ_1_reg4: return (is_reg ? OPSZ_4 : OPSZ_1);
case OPSZ_2_reg4: return (is_reg ? OPSZ_4 : OPSZ_2);
case OPSZ_4_reg16:
return (is_reg ? OPSZ_4 /* i#1382: we distinguish sub-xmm now */
: OPSZ_4);
}
return sz;
}
/* Like all our code, we assume cs specifies default data and address sizes.
* This routine assumes the size varies by data, NOT by address!
*/
opnd_size_t
resolve_variable_size(decode_info_t *di /*IN: x86_mode, prefixes*/, opnd_size_t sz,
bool is_reg)
{
switch (sz) {
case OPSZ_2_short1: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_1 : OPSZ_2);
case OPSZ_4_short2: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4);
case OPSZ_4x8: return (X64_MODE(di) ? OPSZ_8 : OPSZ_4);
case OPSZ_4x8_short2:
return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2
: (X64_MODE(di) ? OPSZ_8 : OPSZ_4));
case OPSZ_4x8_short2xi8:
return (X64_MODE(di) ? (proc_get_vendor() == VENDOR_INTEL
? OPSZ_8
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_8))
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_short2xi4:
return ((X64_MODE(di) && proc_get_vendor() == VENDOR_INTEL)
? OPSZ_4
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_rex8_short2: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_8
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_4));
case OPSZ_4_rex8: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_4);
case OPSZ_6_irex10_short4: /* rex.w trumps data prefix, but is ignored on AMD */
DODEBUG({
/* less annoying than a CURIOSITY assert when testing */
if (TEST(PREFIX_REX_W, di->prefixes))
SYSLOG_INTERNAL_INFO_ONCE("curiosity: rex.w on OPSZ_6_irex10_short4!");
});
return ((TEST(PREFIX_REX_W, di->prefixes) && proc_get_vendor() != VENDOR_AMD)
? OPSZ_10
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_6));
case OPSZ_6x10: return (X64_MODE(di) ? OPSZ_10 : OPSZ_6);
case OPSZ_8_short2: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_2 : OPSZ_8);
case OPSZ_8_short4: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_8);
case OPSZ_8_rex16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_16 : OPSZ_8);
case OPSZ_8_rex16_short4: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_16
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_4 : OPSZ_8));
case OPSZ_12_rex40_short6: /* rex.w trumps data prefix */
return (TEST(PREFIX_REX_W, di->prefixes)
? OPSZ_40
: (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_6 : OPSZ_12));
case OPSZ_16_vex32: return (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_16);
case OPSZ_32_short16: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_16 : OPSZ_32);
case OPSZ_28_short14: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_14 : OPSZ_28);
case OPSZ_108_short94: return (TEST(PREFIX_DATA, di->prefixes) ? OPSZ_94 : OPSZ_108);
case OPSZ_1_reg4:
case OPSZ_2_reg4:
case OPSZ_4_reg16: return resolve_var_reg_size(sz, is_reg);
/* The _of_ types are not exposed to the user so convert here */
case OPSZ_1_of_16: return OPSZ_1;
case OPSZ_2_of_8:
case OPSZ_2_of_16: return OPSZ_2;
case OPSZ_4_of_8:
case OPSZ_4_of_16: return OPSZ_4;
case OPSZ_4_rex8_of_16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_4);
case OPSZ_8_of_16: return OPSZ_8;
case OPSZ_12_of_16: return OPSZ_12;
case OPSZ_12_rex8_of_16: return (TEST(PREFIX_REX_W, di->prefixes) ? OPSZ_8 : OPSZ_12);
case OPSZ_14_of_16: return OPSZ_14;
case OPSZ_15_of_16: return OPSZ_15;
case OPSZ_8_of_16_vex32: return (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_8);
case OPSZ_16_of_32: return OPSZ_16;
case OPSZ_16_vex32_evex64:
/* XXX i#1312: There may be a conflict since LL' is also used for rounding
* control in AVX-512 if used in combination.
*/
return (TEST(PREFIX_EVEX_LL, di->prefixes)
? OPSZ_64
: (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_32 : OPSZ_16));
case OPSZ_half_16_vex32: return (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_16 : OPSZ_8);
case OPSZ_half_16_vex32_evex64:
return (TEST(PREFIX_EVEX_LL, di->prefixes)
? OPSZ_32
: (TEST(PREFIX_VEX_L, di->prefixes) ? OPSZ_16 : OPSZ_8));
}
return sz;
}
opnd_size_t
expand_subreg_size(opnd_size_t sz)
{
switch (sz) {
case OPSZ_2_of_8:
case OPSZ_4_of_8: return OPSZ_8;
case OPSZ_1_of_16:
case OPSZ_2_of_16:
case OPSZ_4_of_16:
case OPSZ_4_rex8_of_16:
case OPSZ_8_of_16:
case OPSZ_12_of_16:
case OPSZ_12_rex8_of_16:
case OPSZ_14_of_16:
case OPSZ_15_of_16:
case OPSZ_4_reg16: return OPSZ_16;
case OPSZ_16_of_32: return OPSZ_32;
case OPSZ_8_of_16_vex32:
case OPSZ_half_16_vex32: return OPSZ_16_vex32;
case OPSZ_half_16_vex32_evex64: return OPSZ_16_vex32_evex64;
}
return sz;
}
opnd_size_t
resolve_variable_size_dc(dcontext_t *dcontext, uint prefixes, opnd_size_t sz, bool is_reg)
{
decode_info_t di;
IF_X64(di.x86_mode = get_x86_mode(dcontext));
di.prefixes = prefixes;
return resolve_variable_size(&di, sz, is_reg);
}
opnd_size_t
resolve_addr_size(decode_info_t *di /*IN: x86_mode, prefixes*/)
{
if (TEST(PREFIX_ADDR, di->prefixes))
return (X64_MODE(di) ? OPSZ_4 : OPSZ_2);
else
return (X64_MODE(di) ? OPSZ_8 : OPSZ_4);
}
bool
optype_is_indir_reg(int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG:
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_N:
case TYPE_INDIR_VAR_XIREG:
case TYPE_INDIR_VAR_XIREG_OFFS_1:
case TYPE_INDIR_VAR_REG:
case TYPE_INDIR_VAR_REG_OFFS_2:
case TYPE_INDIR_VAR_REG_SIZEx2:
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_SIZEx8:
case TYPE_INDIR_VAR_REG_SIZEx3x5: return true;
}
return false;
}
opnd_size_t
indir_var_reg_size(decode_info_t *di, int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG:
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_N:
/* non-zero immed int adds additional, but we require client to handle that
* b/c our decoding and encoding can't see the rest of the operands
*/
return OPSZ_VARSTACK;
case TYPE_INDIR_VAR_XIREG:
case TYPE_INDIR_VAR_XIREG_OFFS_1: return OPSZ_ret;
case TYPE_INDIR_VAR_REG: return OPSZ_REXVARSTACK;
case TYPE_INDIR_VAR_REG_OFFS_2:
case TYPE_INDIR_VAR_REG_SIZEx2: return OPSZ_8_rex16_short4;
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_SIZEx8: return OPSZ_32_short16;
case TYPE_INDIR_VAR_REG_SIZEx3x5: return OPSZ_12_rex40_short6;
default: CLIENT_ASSERT(false, "internal error: invalid indir reg type");
}
return OPSZ_0;
}
/* Returns multiplier of the operand size to use as the base-disp offs */
int
indir_var_reg_offs_factor(int optype)
{
switch (optype) {
case TYPE_INDIR_VAR_XREG_OFFS_1:
case TYPE_INDIR_VAR_XREG_OFFS_8:
case TYPE_INDIR_VAR_XREG_OFFS_N:
case TYPE_INDIR_VAR_XIREG_OFFS_1:
case TYPE_INDIR_VAR_REG_OFFS_2: return -1;
}
return 0;
}
/****************************************************************************
* Reading all bytes of instruction
*/
static byte *
read_immed(byte *pc, decode_info_t *di, opnd_size_t size, ptr_int_t *result)
{
size = resolve_variable_size(di, size, false);
/* all data immediates are sign-extended. we use the compiler's casts with
* signed types to do our sign extensions for us.
*/
switch (size) {
case OPSZ_1:
*result = (ptr_int_t)(char)*pc; /* sign-extend */
pc++;
break;
case OPSZ_2:
*result = (ptr_int_t) * ((short *)pc); /* sign-extend */
pc += 2;
break;
case OPSZ_4:
*result = (ptr_int_t) * ((int *)pc); /* sign-extend */
pc += 4;
break;
case OPSZ_8:
CLIENT_ASSERT(X64_MODE(di), "decode immediate: invalid size");
CLIENT_ASSERT(sizeof(ptr_int_t) == 8, "decode immediate: internal size error");
*result = *((ptr_int_t *)pc);
pc += 8;
break;
default:
/* called internally w/ instr_info_t fields or hardcoded values,
* so ok to assert */
CLIENT_ASSERT(false, "decode immediate: unknown size");
}
return pc;
}
/* reads any trailing immed bytes */
static byte *
read_operand(byte *pc, decode_info_t *di, byte optype, opnd_size_t opsize)
{
ptr_int_t val = 0;
opnd_size_t size = opsize;
switch (optype) {
case TYPE_A: {
CLIENT_ASSERT(!X64_MODE(di), "x64 has no type A instructions");
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(opsize == OPSZ_6_irex10_short4, "decode A operand error");
if (TEST(PREFIX_DATA, di->prefixes)) {
/* 4-byte immed */
pc = read_immed(pc, di, OPSZ_4, &val);
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val &= (ptr_int_t)0x00000000ffffffff;
}
#endif
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed == OPSZ_NA && di->size_immed2 == OPSZ_NA,
"decode A operand error");
di->size_immed = resolve_variable_size(di, opsize, false);
ASSERT(di->size_immed == OPSZ_4);
di->immed = val;
} else {
/* 6-byte immed */
ptr_int_t val2 = 0;
/* little-endian: segment comes last */
pc = read_immed(pc, di, OPSZ_4, &val2);
pc = read_immed(pc, di, OPSZ_2, &val);
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val2 &= (ptr_int_t)0x00000000ffffffff;
}
#endif
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed == OPSZ_NA && di->size_immed2 == OPSZ_NA,
"decode A operand error");
di->size_immed = resolve_variable_size(di, opsize, false);
ASSERT(di->size_immed == OPSZ_6);
di->size_immed2 = resolve_variable_size(di, opsize, false);
di->immed = val;
di->immed2 = val2;
}
return pc;
}
case TYPE_I: {
pc = read_immed(pc, di, opsize, &val);
break;
}
case TYPE_J: {
byte *end_pc;
pc = read_immed(pc, di, opsize, &val);
if (di->orig_pc != di->start_pc) {
CLIENT_ASSERT(di->start_pc != NULL,
"internal decode error: start pc not set");
end_pc = di->orig_pc + (pc - di->start_pc);
} else
end_pc = pc;
/* convert from relative offset to absolute target pc */
val = ((ptr_int_t)end_pc) + val;
if ((!X64_MODE(di) || proc_get_vendor() != VENDOR_INTEL) &&
TEST(PREFIX_DATA, di->prefixes)) {
/* need to clear upper 16 bits */
val &= (ptr_int_t)0x0000ffff;
} /* for x64 Intel, always 64-bit addr ("f64" in Intel table) */
break;
}
case TYPE_L: {
/* part of AVX: top 4 bits of 8-bit immed select xmm/ymm register */
pc = read_immed(pc, di, OPSZ_1, &val);
break;
}
case TYPE_O: {
/* no modrm byte, offset follows directly. this is address-sized,
* so 64-bit for x64, and addr prefix affects it. */
size = resolve_addr_size(di);
pc = read_immed(pc, di, size, &val);
if (TEST(PREFIX_ADDR, di->prefixes)) {
/* need to clear upper bits */
if (X64_MODE(di))
val &= (ptr_int_t)0xffffffff;
else
val &= (ptr_int_t)0x0000ffff;
}
#ifdef X64
if (!X64_MODE(di)) {
/* we do not want the sign extension that read_immed() applied */
val &= (ptr_int_t)0x00000000ffffffff;
}
#endif
break;
}
default: return pc;
}
if (di->size_immed == OPSZ_NA) {
di->size_immed = size;
di->immed = val;
} else {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed2 == OPSZ_NA, "decode operand error");
di->size_immed2 = size;
di->immed2 = val;
}
return pc;
}
/* reads the modrm byte and any following sib and offset bytes */
static byte *
read_modrm(byte *pc, decode_info_t *di)
{
byte modrm = *pc;
pc++;
di->modrm = modrm;
di->mod = (byte)((modrm >> 6) & 0x3); /* top 2 bits */
di->reg = (byte)((modrm >> 3) & 0x7); /* middle 3 bits */
di->rm = (byte)(modrm & 0x7); /* bottom 3 bits */
/* addr16 displacement */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
di->has_sib = false;
if ((di->mod == 0 && di->rm == 6) || di->mod == 2) {
/* 2-byte disp */
di->has_disp = true;
if (di->mod == 0 && di->rm == 6) {
/* treat absolute addr as unsigned */
di->disp = (int)*((ushort *)pc); /* zero-extend */
} else {
/* treat relative addr as signed */
di->disp = (int)*((short *)pc); /* sign-extend */
}
pc += 2;
} else if (di->mod == 1) {
/* 1-byte disp */
di->has_disp = true;
di->disp = (int)(char)*pc; /* sign-extend */
pc++;
} else {
di->has_disp = false;
}
} else {
/* 32-bit, which sometimes has a SIB */
if (di->rm == 4 && di->mod != 3) {
/* need SIB */
byte sib = *pc;
pc++;
di->has_sib = true;
di->scale = (byte)((sib >> 6) & 0x3); /* top 2 bits */
di->index = (byte)((sib >> 3) & 0x7); /* middle 3 bits */
di->base = (byte)(sib & 0x7); /* bottom 3 bits */
} else {
di->has_sib = false;
}
/* displacement */
if ((di->mod == 0 && di->rm == 5) ||
(di->has_sib && di->mod == 0 && di->base == 5) || di->mod == 2) {
/* 4-byte disp */
di->has_disp = true;
di->disp = *((int *)pc);
IF_X64(di->disp_abs = pc); /* used to set instr->rip_rel_pos */
pc += 4;
} else if (di->mod == 1) {
/* 1-byte disp */
di->has_disp = true;
di->disp = (int)(char)*pc; /* sign-extend */
pc++;
} else {
di->has_disp = false;
}
}
return pc;
}
/* Given the potential first vex byte at pc, reads any subsequent vex
* bytes (and any prefix bytes) and sets the appropriate prefix flags in di.
* Sets info to the entry for the first opcode byte, and pc to point past
* the first opcode byte.
* Also handles xop encodings, which are quite similar to vex.
*/
static byte *
read_vex(byte *pc, decode_info_t *di, byte instr_byte,
const instr_info_t **ret_info INOUT, bool *is_vex /*or xop*/)
{
int idx = 0;
const instr_info_t *info;
byte vex_last = 0, vex_pp;
ASSERT(ret_info != NULL && *ret_info != NULL && is_vex != NULL);
info = *ret_info;
if (info->type == VEX_PREFIX_EXT) {
/* If 32-bit mode and mod selects for memory, this is not vex */
if (X64_MODE(di) || TESTALL(MODRM_BYTE(3, 0, 0), *pc))
idx = 1;
else
idx = 0;
info = &vex_prefix_extensions[info->code][idx];
} else if (info->type == XOP_PREFIX_EXT) {
/* If m-mmm (what AMD calls "map_select") < 8, this is not vex */
if ((*pc & 0x1f) < 0x8)
idx = 0;
else
idx = 1;
info = &xop_prefix_extensions[info->code][idx];
} else
CLIENT_ASSERT(false, "internal vex decoding error");
if (idx == 0) {
/* not vex */
*ret_info = info;
*is_vex = false;
return pc;
}
*is_vex = true;
if (TESTANY(PREFIX_REX_ALL | PREFIX_LOCK, di->prefixes) || di->data_prefix ||
di->rep_prefix || di->repne_prefix) {
/* #UD if combined w/ VEX prefix */
*ret_info = &invalid_instr;
return pc;
}
/* read 2nd vex byte */
instr_byte = *pc;
pc++;
if (info->code == PREFIX_VEX_2B) {
CLIENT_ASSERT(info->type == PREFIX, "internal vex decoding error");
/* fields are: R, vvvv, L, PP. R is inverted. */
vex_last = instr_byte;
if (!TEST(0x80, vex_last))
di->prefixes |= PREFIX_REX_R;
/* 2-byte vex implies leading 0x0f */
*ret_info = &escape_instr;
/* rest are shared w/ 3-byte form's final byte */
} else if (info->code == PREFIX_VEX_3B || info->code == PREFIX_XOP) {
byte vex_mm;
CLIENT_ASSERT(info->type == PREFIX, "internal vex decoding error");
/* fields are: R, X, B, m-mmmm. R, X, and B are inverted. */
if (!TEST(0x80, instr_byte))
di->prefixes |= PREFIX_REX_R;
if (!TEST(0x40, instr_byte))
di->prefixes |= PREFIX_REX_X;
if (!TEST(0x20, instr_byte))
di->prefixes |= PREFIX_REX_B;
vex_mm = instr_byte & 0x1f;
/* our strategy is to decode through the regular tables w/ a vex-encoded
* flag, to match Intel manuals and vex implicit-prefix flags
*/
if (info->code == PREFIX_VEX_3B) {
if (vex_mm == 1) {
*ret_info = &escape_instr;
} else if (vex_mm == 2) {
*ret_info = &escape_38_instr;
} else if (vex_mm == 3) {
*ret_info = &escape_3a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
} else {
/* xop */
if (vex_mm == 0x8) {
*ret_info = &xop_8_instr;
} else if (vex_mm == 0x9) {
*ret_info = &xop_9_instr;
} else if (vex_mm == 0xa) {
*ret_info = &xop_a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
}
/* read 3rd vex byte */
vex_last = *pc;
pc++;
/* fields are: W, vvvv, L, PP */
/* Intel docs say vex.W1 behaves just like rex.w except in cases
* where rex.w is ignored, so no need for a PREFIX_VEX_W flag
*/
if (TEST(0x80, vex_last))
di->prefixes |= PREFIX_REX_W;
/* rest are shared w/ 2-byte form's final byte */
} else
CLIENT_ASSERT(false, "internal vex decoding error");
/* shared vex fields */
vex_pp = vex_last & 0x03;
di->vex_vvvv = (vex_last & 0x78) >> 3;
if (TEST(0x04, vex_last))
di->prefixes |= PREFIX_VEX_L;
if (vex_pp == 0x1)
di->data_prefix = true;
else if (vex_pp == 0x2)
di->rep_prefix = true;
else if (vex_pp == 0x3)
di->repne_prefix = true;
di->vex_encoded = true;
return pc;
}
/* Given the potential first evex byte at pc, reads any subsequent evex
* bytes (and any prefix bytes) and sets the appropriate prefix flags in di.
* Sets info to the entry for the first opcode byte, and pc to point past
* the first opcode byte.
*/
static byte *
read_evex(byte *pc, decode_info_t *di, byte instr_byte,
const instr_info_t **ret_info INOUT, bool *is_evex)
{
const instr_info_t *info;
byte prefix_byte = 0, evex_pp = 0;
ASSERT(ret_info != NULL && *ret_info != NULL && is_evex != NULL);
info = *ret_info;
CLIENT_ASSERT(info->type == EVEX_PREFIX_EXT, "internal evex decoding error");
/* If 32-bit mode and mod selects for memory, this is not evex */
if (X64_MODE(di) || TESTALL(MODRM_BYTE(3, 0, 0), *pc)) {
/* P[3:2] must be 0 and P[10] must be 1, otherwise #UD */
if (TEST(0xC, *pc) || !TEST(0x04, *(pc + 1))) {
*ret_info = &invalid_instr;
return pc;
}
*is_evex = true;
info = &evex_prefix_extensions[0][1];
} else {
/* not evex */
*is_evex = false;
*ret_info = &evex_prefix_extensions[0][0];
return pc;
}
CLIENT_ASSERT(info->code == PREFIX_EVEX, "internal evex decoding error");
/* read 2nd evex byte */
instr_byte = *pc;
prefix_byte = instr_byte;
pc++;
if (TESTANY(PREFIX_REX_ALL | PREFIX_LOCK, di->prefixes) || di->data_prefix ||
di->rep_prefix || di->repne_prefix) {
/* #UD if combined w/ EVEX prefix */
*ret_info = &invalid_instr;
return pc;
}
CLIENT_ASSERT(info->type == PREFIX, "internal evex decoding error");
/* Fields are: R, X, B, R', 00, mm. R, X, B and R' are inverted. Intel's
* Software Developer's Manual Vol-2A 2.6 AVX-512 ENCODING fails to mention
* explicitly the fact that the bits are inverted in order to make the prefix
* distinct from the bound instruction in 32-bit mode. We experimentally
* confirmed.
*/
if (!TEST(0x80, prefix_byte))
di->prefixes |= PREFIX_REX_R;
if (!TEST(0x40, prefix_byte))
di->prefixes |= PREFIX_REX_X;
if (!TEST(0x20, prefix_byte))
di->prefixes |= PREFIX_REX_B;
if (!TEST(0x10, prefix_byte))
di->prefixes |= PREFIX_EVEX_RR;
byte evex_mm = instr_byte & 0x3;
if (evex_mm == 1) {
*ret_info = &escape_instr;
} else if (evex_mm == 2) {
*ret_info = &escape_38_instr;
} else if (evex_mm == 3) {
*ret_info = &escape_3a_instr;
} else {
/* #UD: reserved for future use */
*ret_info = &invalid_instr;
return pc;
}
/* read 3rd evex byte */
prefix_byte = *pc;
pc++;
/* fields are: W, vvvv, 1, PP */
if (TEST(0x80, prefix_byte)) {
di->prefixes |= PREFIX_REX_W;
}
evex_pp = prefix_byte & 0x03;
di->evex_vvvv = (prefix_byte & 0x78) >> 3;
if (evex_pp == 0x1)
di->data_prefix = true;
else if (evex_pp == 0x2)
di->rep_prefix = true;
else if (evex_pp == 0x3)
di->repne_prefix = true;
/* read 4th evex byte */
prefix_byte = *pc;
pc++;
/* fields are: z, L', L, b, V' and aaa */
if (TEST(0x80, prefix_byte))
di->prefixes |= PREFIX_EVEX_z;
if (TEST(0x40, prefix_byte))
di->prefixes |= PREFIX_EVEX_LL;
if (TEST(0x20, prefix_byte))
di->prefixes |= PREFIX_VEX_L;
if (TEST(0x10, prefix_byte))
di->prefixes |= PREFIX_EVEX_b;
if (!TEST(0x08, prefix_byte))
di->prefixes |= PREFIX_EVEX_VV;
di->evex_aaa = prefix_byte & 0x07;
di->evex_encoded = true;
return pc;
}
/* Given an instr_info_t PREFIX_EXT entry, reads the next entry based on the prefixes.
* Note that this function does not initialize the opcode field in \p di but is set in
* \p info->type.
*/
static inline const instr_info_t *
read_prefix_ext(const instr_info_t *info, decode_info_t *di)
{
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 1 : (di->data_prefix ? 2 : (di->repne_prefix ? 3 : 0)));
if (di->vex_encoded)
idx += 4;
else if (di->evex_encoded)
idx += 8;
info = &prefix_extensions[code][idx];
if (info->type == INVALID && !DYNAMO_OPTION(decode_strict)) {
/* i#1118: some of these seem to not be invalid with
* prefixes that land in blank slots in the decode tables.
* Though it seems to only be btc, bsf, and bsr (the SSE*
* instrs really do seem invalid when given unlisted prefixes),
* we'd rather err on the side of treating as valid, which is
* after all what gdb and dumpbin list. Even if these
* fault when executed, we know the length, so there's no
* downside to listing as valid, for DR anyway.
* Users of drdecodelib may want to be more aggressive: hence the
* -decode_strict option.
*/
/* Take the base entry w/o prefixes and keep the prefixes */
CLIENT_ASSERT(!di->evex_encoded, "TODO i#1312: decode error: unsupported yet.");
info = &prefix_extensions[code][0 + (di->vex_encoded ? 4 : 0)];
} else if (di->rep_prefix)
di->rep_prefix = false;
else if (di->repne_prefix)
di->repne_prefix = false;
if (di->data_prefix &&
/* Don't remove it if the entry doesn't list 0x66:
* e.g., OP_bsr (i#1118).
*/
(info->opcode >> 24) == DATA_PREFIX_OPCODE)
di->data_prefix = false;
if (info->type == REX_B_EXT) {
/* discard old info, get new one */
code = (int)info->code;
idx = (TEST(PREFIX_REX_B, di->prefixes) ? 1 : 0);
info = &rex_b_extensions[code][idx];
}
return info;
}
/* Disassembles the instruction at pc into the data structures ret_info
* and di. Does NOT set or read di->len.
* Returns a pointer to the pc of the next instruction.
* If just_opcode is true, does not decode the immeds and returns NULL
* (you must call decode_next_pc to get the next pc, but that's faster
* than decoding the immeds)
* Returns NULL on an invalid instruction
*/
static byte *
read_instruction(byte *pc, byte *orig_pc, const instr_info_t **ret_info,
decode_info_t *di, bool just_opcode _IF_DEBUG(bool report_invalid))
{
DEBUG_DECLARE(byte *post_suffix_pc = NULL;)
byte instr_byte;
const instr_info_t *info;
bool vex_noprefix = false;
bool evex_noprefix = false;
/* initialize di */
/* though we only need di->start_pc for full decode rip-rel (and
* there only post-read_instruction()) and decode_from_copy(), and
* di->orig_pc only for decode_from_copy(), we assume that
* high-perf decoding uses decode_cti() and live w/ the extra
* writes here for decode_opcode() and decode_eflags_usage().
*/
di->start_pc = pc;
di->orig_pc = orig_pc;
di->size_immed = OPSZ_NA;
di->size_immed2 = OPSZ_NA;
di->seg_override = REG_NULL;
di->data_prefix = false;
di->rep_prefix = false;
di->repne_prefix = false;
di->vex_encoded = false;
di->evex_encoded = false;
/* FIXME: set data and addr sizes to current mode
* for now I assume always 32-bit mode (or 64 for X64_MODE(di))!
*/
di->prefixes = 0;
do {
instr_byte = *pc;
pc++;
info = &first_byte[instr_byte];
if (info->type == X64_EXT) {
/* discard old info, get new one */
info = &x64_extensions[info->code][X64_MODE(di) ? 1 : 0];
} else if (info->type == VEX_PREFIX_EXT || info->type == XOP_PREFIX_EXT) {
bool is_vex = false; /* or xop */
pc = read_vex(pc, di, instr_byte, &info, &is_vex);
/* if read_vex changes info, leave this loop */
if (info->type != VEX_PREFIX_EXT && info->type != XOP_PREFIX_EXT)
break;
else {
if (is_vex)
vex_noprefix = true; /* staying in loop, but ensure no prefixes */
continue;
}
} else if (info->type == EVEX_PREFIX_EXT) {
bool is_evex = false;
pc = read_evex(pc, di, instr_byte, &info, &is_evex);
/* if read_evex changes info, leave this loop */
if (info->type != EVEX_PREFIX_EXT)
break;
else {
if (is_evex)
evex_noprefix = true; /* staying in loop, but ensure no prefixes */
continue;
}
}
if (info->type == PREFIX) {
if (vex_noprefix || evex_noprefix) {
/* VEX/EVEX prefix must be last */
info = &invalid_instr;
break;
}
if (TESTANY(PREFIX_REX_ALL, di->prefixes)) {
/* rex.* must come after all other prefixes (including those that are
* part of the opcode, xref PR 271878): so discard them if before
* matching the behavior of decode_sizeof(). This in effect nops
* improperly placed rex prefixes which (xref PR 241563 and Intel Manual
* 2A 2.2.1) is the correct thing to do. NOTE - windbg shows early bytes
* as ??, objdump as their prefix names, separate from the next instr.
*/
di->prefixes &= ~PREFIX_REX_ALL;
}
if (info->code == PREFIX_REP) {
/* see if used as part of opcode before considering prefix */
di->rep_prefix = true;
} else if (info->code == PREFIX_REPNE) {
/* see if used as part of opcode before considering prefix */
di->repne_prefix = true;
} else if (REG_START_SEGMENT <= info->code &&
info->code <= REG_STOP_SEGMENT) {
CLIENT_ASSERT_TRUNCATE(di->seg_override, ushort, info->code,
"decode error: invalid segment override");
di->seg_override = (reg_id_t)info->code;
} else if (info->code == PREFIX_DATA) {
/* see if used as part of opcode before considering prefix */
di->data_prefix = true;
} else if (TESTANY(PREFIX_REX_ALL | PREFIX_ADDR | PREFIX_LOCK, info->code)) {
di->prefixes |= info->code;
}
} else
break;
} while (true);
if (info->type == ESCAPE) {
/* discard first byte, move to second */
instr_byte = *pc;
pc++;
info = &second_byte[instr_byte];
}
if (info->type == ESCAPE_3BYTE_38 || info->type == ESCAPE_3BYTE_3a) {
/* discard second byte, move to third */
instr_byte = *pc;
pc++;
if (info->type == ESCAPE_3BYTE_38)
info = &third_byte_38[third_byte_38_index[instr_byte]];
else
info = &third_byte_3a[third_byte_3a_index[instr_byte]];
} else if (info->type == XOP_8_EXT || info->type == XOP_9_EXT ||
info->type == XOP_A_EXT) {
/* discard second byte, move to third */
int idx = 0;
instr_byte = *pc;
pc++;
if (info->type == XOP_8_EXT)
idx = xop_8_index[instr_byte];
else if (info->type == XOP_9_EXT)
idx = xop_9_index[instr_byte];
else if (info->type == XOP_A_EXT)
idx = xop_a_index[instr_byte];
else
CLIENT_ASSERT(false, "internal invalid XOP type");
info = &xop_extensions[idx];
}
/* all FLOAT_EXT and PREFIX_EXT (except nop & pause) and EXTENSION need modrm,
* get it now
*/
if ((info->flags & HAS_MODRM) != 0)
pc = read_modrm(pc, di);
if (info->type == FLOAT_EXT) {
if (di->modrm <= 0xbf) {
int offs = (instr_byte - 0xd8) * 8 + di->reg;
info = &float_low_modrm[offs];
} else {
int offs1 = (instr_byte - 0xd8);
int offs2 = di->modrm - 0xc0;
info = &float_high_modrm[offs1][offs2];
}
} else if (info->type == REP_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 2 : 0);
info = &rep_extensions[code][idx];
if (di->rep_prefix)
di->rep_prefix = false;
} else if (info->type == REPNE_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->rep_prefix ? 2 : (di->repne_prefix ? 4 : 0));
info = &repne_extensions[code][idx];
di->rep_prefix = false;
di->repne_prefix = false;
} else if (info->type == EXTENSION) {
/* discard old info, get new one */
info = &base_extensions[info->code][di->reg];
/* absurd cases of using prefix on top of reg opcode extension
* (pslldq, psrldq) => PREFIX_EXT can happen after here,
* and MOD_EXT after that
*/
} else if (info->type == SUFFIX_EXT) {
/* Discard old info, get new one for complete opcode, which includes
* a suffix byte where an immed would be (yes, ugly!).
* We should have already read in the modrm (+ sib).
*/
CLIENT_ASSERT(TEST(HAS_MODRM, info->flags), "decode error on 3DNow instr");
info = &suffix_extensions[suffix_index[*pc]];
pc++;
DEBUG_DECLARE(post_suffix_pc = pc;)
} else if (info->type == VEX_L_EXT) {
/* TODO i#1312: We probably need to extend this table for EVEX. In this case,
* rename to e_vex_L_extensions or set up a new table?
*/
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->vex_encoded) ? (TEST(PREFIX_VEX_L, di->prefixes) ? 2 : 1) : 0;
info = &vex_L_extensions[code][idx];
} else if (info->type == VEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &vex_W_extensions[code][idx];
} else if (info->type == EVEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &evex_W_extensions[code][idx];
}
/* can occur AFTER above checks (EXTENSION, in particular) */
if (info->type == PREFIX_EXT) {
/* discard old info, get new one */
info = read_prefix_ext(info, di);
}
/* can occur AFTER above checks (PREFIX_EXT, in particular) */
if (info->type == MOD_EXT) {
info = &mod_extensions[info->code][(di->mod == 3) ? 1 : 0];
/* Yes, we have yet another layer, thanks to Intel's poor choice
* in opcodes -- why didn't they fill out the PREFIX_EXT space?
*/
if (info->type == RM_EXT) {
info = &rm_extensions[info->code][di->rm];
}
/* We have to support prefix before mod, and mod before prefix */
if (info->type == PREFIX_EXT) {
info = read_prefix_ext(info, di);
}
}
/* can occur AFTER above checks (MOD_EXT, in particular) */
if (info->type == E_VEX_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = 0;
if (di->vex_encoded)
idx = 1;
else if (di->evex_encoded)
idx = 2;
info = &e_vex_extensions[code][idx];
}
/* can occur AFTER above checks (EXTENSION, in particular) */
if (info->type == PREFIX_EXT) {
/* discard old info, get new one */
info = read_prefix_ext(info, di);
}
/* can occur AFTER above checks (MOD_EXT, in particular) */
if (info->type == REX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &rex_w_extensions[code][idx];
} else if (info->type == VEX_L_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (di->vex_encoded) ? (TEST(PREFIX_VEX_L, di->prefixes) ? 2 : 1) : 0;
info = &vex_L_extensions[code][idx];
} else if (info->type == VEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &vex_W_extensions[code][idx];
} else if (info->type == EVEX_W_EXT) {
/* discard old info, get new one */
int code = (int)info->code;
int idx = (TEST(PREFIX_REX_W, di->prefixes) ? 1 : 0);
info = &evex_W_extensions[code][idx];
}
if (TEST(REQUIRES_PREFIX, info->flags)) {
byte required = (byte)(info->opcode >> 24);
bool *prefix_var = NULL;
if (required == 0) { /* cannot have a prefix */
if (prefix_var != NULL) {
/* invalid instr */
info = NULL;
}
} else {
CLIENT_ASSERT(info->opcode > 0xffffff, "decode error in SSSE3/SSE4 instr");
if (required == DATA_PREFIX_OPCODE)
prefix_var = &di->data_prefix;
else if (required == REPNE_PREFIX_OPCODE)
prefix_var = &di->repne_prefix;
else if (required == REP_PREFIX_OPCODE)
prefix_var = &di->rep_prefix;
else
CLIENT_ASSERT(false, "internal required-prefix error");
if (prefix_var == NULL || !*prefix_var) {
/* Invalid instr. TODO: have processor w/ SSE4, confirm that
* an exception really is raised.
*/
info = NULL;
} else
*prefix_var = false;
}
}
/* we go through regular tables for vex but only some are valid w/ vex */
if (info != NULL && di->vex_encoded) {
if (!TEST(REQUIRES_VEX, info->flags))
info = NULL; /* invalid encoding */
else if (TEST(REQUIRES_VEX_L_0, info->flags) && TEST(PREFIX_VEX_L, di->prefixes))
info = NULL; /* invalid encoding */
} else if (info != NULL && !di->vex_encoded && TEST(REQUIRES_VEX, info->flags)) {
info = NULL; /* invalid encoding */
} else if (info != NULL && di->evex_encoded) {
if (!TEST(REQUIRES_EVEX, info->flags))
info = NULL; /* invalid encoding */
else if (TEST(REQUIRES_VEX_L_0, info->flags) && TEST(PREFIX_VEX_L, di->prefixes))
info = NULL; /* invalid encoding */
else if (TEST(REQUIRES_EVEX_LL_0, info->flags) &&
TEST(PREFIX_EVEX_LL, di->prefixes))
info = NULL; /* invalid encoding */
} else if (info != NULL && !di->evex_encoded && TEST(REQUIRES_EVEX, info->flags))
info = NULL; /* invalid encoding */
/* XXX: not currently marking these cases as invalid instructions:
* - if no TYPE_H:
* "Note: In VEX-encoded versions, VEX.vvvv is reserved and must be 1111b otherwise
* instructions will #UD."
* - "an attempt to execute VTESTPS with VEX.W=1 will cause #UD."
* and similar for VEX.W.
*/
/* at this point should be an instruction, so type should be an OP_ constant */
if (info == NULL || info == &invalid_instr || info->type < OP_FIRST ||
info->type > OP_LAST || (X64_MODE(di) && TEST(X64_INVALID, info->flags)) ||
(!X64_MODE(di) && TEST(X86_INVALID, info->flags))) {
/* invalid instruction: up to caller to decide what to do with it */
/* FIXME case 10672: provide a runtime option to specify new
* instruction formats */
DODEBUG({
/* don't report when decoding DR addresses, as we sometimes try to
* decode backward (e.g., interrupted_inlined_syscall(): PR 605161)
* XXX: better to pass in a flag when decoding that we are
* being speculative!
*/
if (report_invalid && !is_dynamo_address(di->start_pc)) {
SYSLOG_INTERNAL_WARNING_ONCE("Invalid opcode encountered");
if (info != NULL && info->type == INVALID) {
LOG(THREAD_GET, LOG_ALL, 1, "Invalid opcode @" PFX ": 0x%x\n",
di->start_pc, info->opcode);
} else {
int i;
dcontext_t *dcontext = get_thread_private_dcontext();
IF_X64(bool old_mode = set_x86_mode(dcontext, di->x86_mode);)
int sz = decode_sizeof(dcontext, di->start_pc, NULL _IF_X64(NULL));
IF_X64(set_x86_mode(dcontext, old_mode));
LOG(THREAD_GET, LOG_ALL, 1,
"Error decoding " PFX " == ", di->start_pc);
for (i = 0; i < sz; i++) {
LOG(THREAD_GET, LOG_ALL, 1, "0x%x ", *(di->start_pc + i));
}
LOG(THREAD_GET, LOG_ALL, 1, "\n");
}
}
});
*ret_info = &invalid_instr;
return NULL;
}
#ifdef INTERNAL
DODEBUG({ /* rep & repne should have been completely handled by now */
/* processor will typically ignore extra prefixes, but we log this
* internally in case it's our decode messing up instead of weird app instrs
*/
bool spurious = report_invalid && (di->rep_prefix || di->repne_prefix);
if (spurious) {
if (di->rep_prefix &&
/* case 6861: AMD64 opt: "rep ret" used if br tgt or after cbr */
pc == di->start_pc + 2 && *(di->start_pc + 1) == RAW_OPCODE_ret)
spurious = false;
if (di->repne_prefix) {
/* i#1899: MPX puts repne prior to branches. We ignore here until
* we have full MPX decoding support (i#1312).
*/
/* XXX: We assume the x86 instr_is_* routines only need the opcode.
* That is not true for ARM.
*/
instr_t inst;
inst.opcode = info->type;
if (instr_is_cti(&inst))
spurious = false;
}
}
if (spurious) {
char bytes[17 * 3];
int i;
dcontext_t *dcontext = get_thread_private_dcontext();
IF_X64(bool old_mode = set_x86_mode(dcontext, di->x86_mode);)
int sz = decode_sizeof(dcontext, di->start_pc, NULL _IF_X64(NULL));
IF_X64(set_x86_mode(dcontext, old_mode));
CLIENT_ASSERT(sz <= 17, "decode rep/repne error: unsupported opcode?");
for (i = 0; i < sz; i++)
snprintf(&bytes[i * 3], 3, "%02x ", *(di->start_pc + i));
bytes[sz * 3 - 1] = '\0'; /* -1 to kill trailing space */
SYSLOG_INTERNAL_WARNING_ONCE(
"spurious rep/repne prefix @" PFX " (%s): ", di->start_pc, bytes);
}
});
#endif
/* if just want opcode, stop here! faster for caller to
* separately call decode_next_pc than for us to decode immeds!
*/
if (just_opcode) {
*ret_info = info;
return NULL;
}
if (di->data_prefix) {
/* prefix was not part of opcode, it's a real prefix */
/* From Intel manual:
* "For non-byte operations: if a 66H prefix is used with
* prefix (REX.W = 1), 66H is ignored."
* That means non-byte-specific operations, for which 66H is
* ignored as well, right?
* Xref PR 593593.
* Note that this means we could assert or remove some of
* the "rex.w trumps data prefix" logic elsewhere in this file.
*/
if (TEST(PREFIX_REX_W, di->prefixes)) {
LOG(THREAD_GET, LOG_ALL, 3, "Ignoring 0x66 in presence of rex.w @" PFX "\n",
di->start_pc);
} else {
di->prefixes |= PREFIX_DATA;
}
}
if ((di->repne_prefix || di->rep_prefix) &&
(TEST(PREFIX_LOCK, di->prefixes) ||
/* xrelease can go on non-0xa3 mov_st w/o lock prefix */
(di->repne_prefix && info->type == OP_mov_st &&
(info->opcode & 0xa30000) != 0xa30000))) {
/* we don't go so far as to ensure the mov_st is of the right type */
if (di->repne_prefix)
di->prefixes |= PREFIX_XACQUIRE;
if (di->rep_prefix)
di->prefixes |= PREFIX_XRELEASE;
}
/* read any trailing immediate bytes */
if (info->dst1_type != TYPE_NONE)
pc = read_operand(pc, di, info->dst1_type, info->dst1_size);
if (info->dst2_type != TYPE_NONE)
pc = read_operand(pc, di, info->dst2_type, info->dst2_size);
if (info->src1_type != TYPE_NONE)
pc = read_operand(pc, di, info->src1_type, info->src1_size);
if (info->src2_type != TYPE_NONE)
pc = read_operand(pc, di, info->src2_type, info->src2_size);
if (info->src3_type != TYPE_NONE)
pc = read_operand(pc, di, info->src3_type, info->src3_size);
if (info->type == SUFFIX_EXT) {
/* Shouldn't be any more bytes (immed bytes) read after the modrm+suffix! */
DODEBUG({ CLIENT_ASSERT(pc == post_suffix_pc, "decode error on 3DNow instr"); });
}
/* return values */
*ret_info = info;
return pc;
}
/****************************************************************************
* Full decoding
*/
/* Caller must check for rex.{r,b} extensions before calling this routine */
static reg_id_t
reg8_alternative(decode_info_t *di, reg_id_t reg, uint prefixes)
{
if (X64_MODE(di) && reg >= REG_START_x86_8 && reg <= REG_STOP_x86_8 &&
TESTANY(PREFIX_REX_ALL, prefixes)) {
/* for x64, if any rex prefix exists, we use SPL...SDL instead of
* AH..BH (this seems to be the only use of 0x40 == PREFIX_REX_GENERAL)
*/
return (reg - REG_START_x86_8 + REG_START_x64_8);
}
return reg;
}
/* which register within modrm, vex or evex we're decoding */
typedef enum {
DECODE_REG_REG,
DECODE_REG_BASE,
DECODE_REG_INDEX,
DECODE_REG_RM,
DECODE_REG_VEX,
DECODE_REG_EVEX,
DECODE_REG_OPMASK,
} decode_reg_t;
/* Pass in the raw opsize, NOT a size passed through resolve_variable_size(),
* to avoid allowing OPSZ_6_irex10_short4 w/ data16.
* To create a sub-sized register, caller must set size separately.
*/
static reg_id_t
decode_reg(decode_reg_t which_reg, decode_info_t *di, byte optype, opnd_size_t opsize)
{
bool extend = false;
bool avx512_extend = false;
byte reg = 0;
switch (which_reg) {
case DECODE_REG_REG:
reg = di->reg;
extend = X64_MODE(di) && TEST(PREFIX_REX_R, di->prefixes);
avx512_extend = TEST(PREFIX_EVEX_RR, di->prefixes);
break;
case DECODE_REG_BASE:
reg = di->base;
extend = X64_MODE(di) && TEST(PREFIX_REX_B, di->prefixes);
break;
case DECODE_REG_INDEX:
reg = di->index;
extend = X64_MODE(di) && TEST(PREFIX_REX_X, di->prefixes);
break;
case DECODE_REG_RM:
reg = di->rm;
extend = X64_MODE(di) && TEST(PREFIX_REX_B, di->prefixes);
if (di->evex_encoded)
avx512_extend = TEST(PREFIX_REX_X, di->prefixes);
break;
case DECODE_REG_VEX:
/* Part of XOP/AVX: vex.vvvv selects general-purpose register.
* It has 4 bits so no separate prefix bit is needed to extend.
*/
reg = (~di->vex_vvvv) & 0xf; /* bit-inverted */
extend = false;
avx512_extend = false;
break;
case DECODE_REG_EVEX:
/* Part of AVX-512: evex.vvvv selects general-purpose register.
* It has 4 bits so no separate prefix bit is needed to extend.
* Intel's Software Developer's Manual Vol-2A 2.6 AVX-512 ENCODING fails to
* mention the fact that the bits are inverted in the EVEX prefix. Experimentally
* confirmed.
*/
reg = (~di->evex_vvvv) & 0xf; /* bit-inverted */
extend = false;
avx512_extend = TEST(PREFIX_EVEX_VV, di->prefixes); /* bit-inverted */
break;
case DECODE_REG_OPMASK:
/* Part of AVX-512: evex.aaa selects opmask register. */
reg = di->evex_aaa & 0x7;
break;
default: CLIENT_ASSERT(false, "internal unknown reg error");
}
switch (optype) {
case TYPE_P:
case TYPE_Q:
case TYPE_P_MODRM: return (REG_START_MMX + reg); /* no x64 extensions */
case TYPE_V:
case TYPE_W:
case TYPE_V_MODRM:
case TYPE_VSIB: {
reg_id_t extend_reg = extend ? reg + 8 : reg;
extend_reg = avx512_extend ? extend_reg + 16 : extend_reg;
return (TEST(PREFIX_EVEX_LL, di->prefixes)
? (DR_REG_START_ZMM + extend_reg)
: ((TEST(PREFIX_VEX_L, di->prefixes) &&
/* Not only do we use this for VEX .LIG (where raw reg is either
* OPSZ_32 or OPSZ_16_vex32) but also for VSIB which currently
* does not get up to OPSZ_16 so we can use this negative
* check.
* XXX i#1312: vgather/vscatter VSIB addressing may be OPSZ_16?
* For EVEX .LIG, raw reg will be able to be OPSZ_64 or
* OPSZ_16_vex32_evex64.
*/
expand_subreg_size(opsize) != OPSZ_16)
? (REG_START_YMM + extend_reg)
: (REG_START_XMM + extend_reg)));
}
case TYPE_S:
if (reg >= 6)
return REG_NULL;
return (REG_START_SEGMENT + reg);
case TYPE_C: return (extend ? (REG_START_CR + 8 + reg) : (REG_START_CR + reg));
case TYPE_D: return (extend ? (REG_START_DR + 8 + reg) : (REG_START_DR + reg));
case TYPE_K_REG:
case TYPE_K_MODRM:
case TYPE_K_MODRM_R:
case TYPE_K_VEX:
case TYPE_K_EVEX: return DR_REG_START_OPMASK + reg;
case TYPE_E:
case TYPE_G:
case TYPE_R:
case TYPE_B:
case TYPE_M:
case TYPE_INDIR_E:
case TYPE_FLOATMEM:
/* GPR: fall-through since variable subset of full register */
break;
default: CLIENT_ASSERT(false, "internal unknown reg error");
}
/* Do not allow a register for 'p' or 'a' types. FIXME: maybe *_far_ind_* should
* use TYPE_INDIR_M instead of TYPE_INDIR_E? What other things are going to turn
* into asserts or crashes instead of invalid instrs based on events as fragile
* as these decode routines moving sizes around?
*/
if (opsize != OPSZ_6_irex10_short4 && opsize != OPSZ_8_short4)
opsize = resolve_variable_size(di, opsize, true);
switch (opsize) {
case OPSZ_1:
if (extend)
return (REG_START_8 + 8 + reg);
else
return reg8_alternative(di, REG_START_8 + reg, di->prefixes);
case OPSZ_2: return (extend ? (REG_START_16 + 8 + reg) : (REG_START_16 + reg));
case OPSZ_4: return (extend ? (REG_START_32 + 8 + reg) : (REG_START_32 + reg));
case OPSZ_8: return (extend ? (REG_START_64 + 8 + reg) : (REG_START_64 + reg));
case OPSZ_6:
case OPSZ_6_irex10_short4:
case OPSZ_8_short4:
/* invalid: no register of size p */
return REG_NULL;
default:
/* ok to assert since params controlled by us */
CLIENT_ASSERT(false, "decode error: unknown register size");
return REG_NULL;
}
}
static bool
decode_modrm(decode_info_t *di, byte optype, opnd_size_t opsize, opnd_t *reg_opnd,
opnd_t *rm_opnd)
{
/* for x64, addr prefix affects only base/index and truncates final addr:
* modrm + sib table is the same
*/
bool addr16 = !X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes);
if (reg_opnd != NULL) {
reg_id_t reg = decode_reg(DECODE_REG_REG, di, optype, opsize);
if (reg == REG_NULL)
return false;
*reg_opnd = opnd_create_reg(reg);
opnd_set_size(reg_opnd, resolve_variable_size(di, opsize, true /*is reg*/));
}
if (rm_opnd != NULL) {
reg_id_t base_reg = REG_NULL;
int disp = 0;
reg_id_t index_reg = REG_NULL;
int scale = 0;
char memtype = (optype == TYPE_VSIB ? TYPE_VSIB : TYPE_M);
opnd_size_t memsize = resolve_addr_size(di);
bool encode_zero_disp, force_full_disp;
if (di->has_disp)
disp = di->disp;
else
disp = 0;
if (di->has_sib) {
CLIENT_ASSERT(!addr16, "decode error: x86 addr16 cannot have a SIB byte");
if (di->index == 4 &&
/* rex.x enables r12 as index */
(!X64_MODE(di) || !TEST(PREFIX_REX_X, di->prefixes)) &&
optype != TYPE_VSIB) {
/* no scale/index */
index_reg = REG_NULL;
} else {
index_reg = decode_reg(DECODE_REG_INDEX, di, memtype, memsize);
if (index_reg == REG_NULL) {
CLIENT_ASSERT(false, "decode error: !index: internal modrm error");
return false;
}
if (di->scale == 0)
scale = 1;
else if (di->scale == 1)
scale = 2;
else if (di->scale == 2)
scale = 4;
else if (di->scale == 3)
scale = 8;
}
if (di->base == 5 && di->mod == 0) {
/* no base */
base_reg = REG_NULL;
} else {
base_reg = decode_reg(DECODE_REG_BASE, di, TYPE_M, memsize);
if (base_reg == REG_NULL) {
CLIENT_ASSERT(false, "decode error: internal modrm decode error");
return false;
}
}
} else {
if (optype == TYPE_VSIB)
return false; /* invalid w/o vsib byte */
if ((!addr16 && di->mod == 0 && di->rm == 5) ||
(addr16 && di->mod == 0 && di->rm == 6)) {
/* just absolute displacement, or rip-relative for x64 */
#ifdef X64
if (X64_MODE(di)) {
/* rip-relative: convert from relative offset to absolute target pc */
byte *addr;
CLIENT_ASSERT(di->start_pc != NULL,
"internal decode error: start pc not set");
if (di->orig_pc != di->start_pc)
addr = di->orig_pc + di->len + di->disp;
else
addr = di->start_pc + di->len + di->disp;
if (TEST(PREFIX_ADDR, di->prefixes)) {
/* Need to clear upper 32 bits.
* Debuggers do not display this truncation, though
* both Intel and AMD manuals describe it.
* I did verify it w/ actual execution.
*/
ASSERT_NOT_TESTED();
addr = (byte *)((ptr_uint_t)addr & 0xffffffff);
}
*rm_opnd = opnd_create_far_rel_addr(
di->seg_override, (void *)addr,
resolve_variable_size(di, opsize, false));
return true;
} else
#endif
base_reg = REG_NULL;
index_reg = REG_NULL;
} else if (di->mod == 3) {
/* register */
reg_id_t rm_reg = decode_reg(DECODE_REG_RM, di, optype, opsize);
if (rm_reg == REG_NULL) /* no assert since happens, e.g., ff d9 */
return false;
else {
*rm_opnd = opnd_create_reg(rm_reg);
opnd_set_size(rm_opnd,
resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
} else {
/* non-sib reg-based memory address */
if (addr16) {
/* funny order requiring custom decode */
switch (di->rm) {
case 0:
base_reg = REG_BX;
index_reg = REG_SI;
scale = 1;
break;
case 1:
base_reg = REG_BX;
index_reg = REG_DI;
scale = 1;
break;
case 2:
base_reg = REG_BP;
index_reg = REG_SI;
scale = 1;
break;
case 3:
base_reg = REG_BP;
index_reg = REG_DI;
scale = 1;
break;
case 4: base_reg = REG_SI; break;
case 5: base_reg = REG_DI; break;
case 6:
base_reg = REG_BP;
CLIENT_ASSERT(di->mod != 0,
"decode error: %bp cannot have mod 0");
break;
case 7: base_reg = REG_BX; break;
default:
CLIENT_ASSERT(false, "decode error: unknown modrm rm");
break;
}
} else {
/* single base reg */
base_reg = decode_reg(DECODE_REG_RM, di, memtype, memsize);
if (base_reg == REG_NULL) {
CLIENT_ASSERT(false,
"decode error: !base: internal modrm decode error");
return false;
}
}
}
}
/* We go ahead and preserve the force bools if the original really had a 0
* disp; up to user to unset bools when changing disp value (FIXME: should
* we auto-unset on first mod?)
*/
encode_zero_disp = di->has_disp && disp == 0 &&
/* there is no bp base without a disp */
(!addr16 || base_reg != REG_BP);
force_full_disp =
di->has_disp && disp >= INT8_MIN && disp <= INT8_MAX && di->mod == 2;
if (di->seg_override != REG_NULL) {
*rm_opnd = opnd_create_far_base_disp_ex(
di->seg_override, base_reg, index_reg, scale, disp,
resolve_variable_size(di, opsize, false), encode_zero_disp,
force_full_disp, TEST(PREFIX_ADDR, di->prefixes));
} else {
/* Note that OP_{jmp,call}_far_ind does NOT have a far base disp
* operand: it is a regular base disp containing 6 bytes that
* specify a segment selector and address. The opcode must be
* examined to know how to interpret those 6 bytes.
*/
*rm_opnd = opnd_create_base_disp_ex(base_reg, index_reg, scale, disp,
resolve_variable_size(di, opsize, false),
encode_zero_disp, force_full_disp,
TEST(PREFIX_ADDR, di->prefixes));
}
}
return true;
}
static ptr_int_t
get_immed(decode_info_t *di, opnd_size_t opsize)
{
ptr_int_t val = 0;
if (di->size_immed == OPSZ_NA) {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed2 != OPSZ_NA, "decode immediate size error");
val = di->immed2;
di->size_immed2 = OPSZ_NA; /* mark as used up */
} else {
/* ok b/c only instr_info_t fields passed */
CLIENT_ASSERT(di->size_immed != OPSZ_NA, "decode immediate size error");
val = di->immed;
di->size_immed = OPSZ_NA; /* mark as used up */
}
return val;
}
/* Also takes in reg8 for TYPE_REG_EX mov_imm */
reg_id_t
resolve_var_reg(decode_info_t *di /*IN: x86_mode, prefixes*/, reg_id_t reg32, bool addr,
bool can_shrink _IF_X64(bool default_64) _IF_X64(bool can_grow)
_IF_X64(bool extendable))
{
#ifdef X64
if (extendable && X64_MODE(di) && di->prefixes != 0 /*optimization*/) {
/* Note that Intel's table 3-1 on +r possibilities is incorrect:
* it lists rex.r, while Table 2-4 lists rex.b which is correct.
*/
if (TEST(PREFIX_REX_B, di->prefixes))
reg32 = reg32 + 8;
else
reg32 = reg8_alternative(di, reg32, di->prefixes);
}
#endif
if (addr) {
#ifdef X64
if (X64_MODE(di)) {
CLIENT_ASSERT(default_64, "addr-based size must be default 64");
if (!can_shrink || !TEST(PREFIX_ADDR, di->prefixes))
return reg_32_to_64(reg32);
/* else leave 32 (it's addr32 not addr16) */
} else
#endif
if (can_shrink && TEST(PREFIX_ADDR, di->prefixes))
return reg_32_to_16(reg32);
} else {
#ifdef X64
/* rex.w trumps data prefix */
if (X64_MODE(di) &&
((can_grow && TEST(PREFIX_REX_W, di->prefixes)) ||
(default_64 && (!can_shrink || !TEST(PREFIX_DATA, di->prefixes)))))
return reg_32_to_64(reg32);
else
#endif
if (can_shrink && TEST(PREFIX_DATA, di->prefixes))
return reg_32_to_16(reg32);
}
return reg32;
}
static reg_id_t
ds_seg(decode_info_t *di)
{
if (di->seg_override != REG_NULL) {
#ifdef X64
/* Although the AMD docs say that es,cs,ss,ds prefixes are NOT treated as
* segment override prefixes and instead as NULL prefixes, Intel docs do not
* say that, and both gdb and windbg disassemble as though the prefixes are
* taking effect. We therefore do not suppress those prefixes.
*/
#endif
return di->seg_override;
}
return SEG_DS;
}
static bool
decode_operand(decode_info_t *di, byte optype, opnd_size_t opsize, opnd_t *opnd)
{
/* resolving here, for non-reg, makes for simpler code: though the
* most common types don't need this.
*/
opnd_size_t ressize = resolve_variable_size(di, opsize, false);
switch (optype) {
case TYPE_NONE: *opnd = opnd_create_null(); return true;
case TYPE_REG:
*opnd = opnd_create_reg(opsize);
/* here and below, for all TYPE_*REG*: no need to set size as it's a GPR */
return true;
case TYPE_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_REG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(true /*growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VARZ_REG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_REGX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrinkable*/
_IF_X64(false /*!d64*/)
_IF_X64(true /*growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_VAR_ADDR_XREG:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, true /*addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/)));
return true;
case TYPE_REG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrink*/
_IF_X64(false /*d32*/)
_IF_X64(false /*!growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_REG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(false /*d32*/)
_IF_X64(true /*growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_XREG_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
true /*shrinkable*/
_IF_X64(true /*d64*/)
_IF_X64(false /*!growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_VAR_REGX_EX:
*opnd = opnd_create_reg(resolve_var_reg(di, opsize, false /*!addr*/,
false /*!shrink*/
_IF_X64(false /*d64*/)
_IF_X64(true /*growable*/)
_IF_X64(true /*extendable*/)));
return true;
case TYPE_FLOATMEM:
case TYPE_M:
case TYPE_VSIB:
/* ensure referencing memory */
if (di->mod >= 3)
return false;
/* fall through */
case TYPE_E:
case TYPE_Q:
case TYPE_W: return decode_modrm(di, optype, opsize, NULL, opnd);
case TYPE_R:
case TYPE_P_MODRM:
case TYPE_V_MODRM:
/* ensure referencing a register */
if (di->mod != 3)
return false;
return decode_modrm(di, optype, opsize, NULL, opnd);
case TYPE_G:
case TYPE_P:
case TYPE_V:
case TYPE_S:
case TYPE_C:
case TYPE_D: return decode_modrm(di, optype, opsize, opnd, NULL);
case TYPE_I:
*opnd = opnd_create_immed_int(get_immed(di, opsize), ressize);
return true;
case TYPE_1:
CLIENT_ASSERT(opsize == OPSZ_0, "internal decode inconsistency");
*opnd = opnd_create_immed_int(1, ressize);
return true;
case TYPE_FLOATCONST:
CLIENT_ASSERT(opsize == OPSZ_0, "internal decode inconsistency");
/* i#386: avoid floating-point instructions */
*opnd = opnd_create_immed_float_for_opcode(di->opcode);
return true;
case TYPE_J:
if (di->seg_override == SEG_JCC_NOT_TAKEN || di->seg_override == SEG_JCC_TAKEN) {
/* SEG_DS - taken, pt */
/* SEG_CS - not taken, pn */
/* starting from RH9 I see code using this */
LOG(THREAD_GET, LOG_EMIT, 5, "disassemble: branch hint %s:\n",
di->seg_override == SEG_JCC_TAKEN ? "pt" : "pn");
if (di->seg_override == SEG_JCC_NOT_TAKEN)
di->prefixes |= PREFIX_JCC_NOT_TAKEN;
else
di->prefixes |= PREFIX_JCC_TAKEN;
di->seg_override = REG_NULL;
STATS_INC(num_branch_hints);
}
/* just ignore other segment prefixes -- don't assert */
*opnd = opnd_create_pc((app_pc)get_immed(di, opsize));
return true;
case TYPE_A: {
/* ok since instr_info_t fields */
CLIENT_ASSERT(!X64_MODE(di), "x64 has no type A instructions");
CLIENT_ASSERT(opsize == OPSZ_6_irex10_short4, "decode A operand error");
/* just ignore segment prefixes -- don't assert */
if (TEST(PREFIX_DATA, di->prefixes)) {
/* 4-byte immed */
ptr_int_t val = get_immed(di, opsize);
*opnd = opnd_create_far_pc((ushort)(((ptr_int_t)val & 0xffff0000) >> 16),
(app_pc)((ptr_int_t)val & 0x0000ffff));
} else {
/* 6-byte immed */
/* ok since instr_info_t fields */
CLIENT_ASSERT(di->size_immed == OPSZ_6 && di->size_immed2 == OPSZ_6,
"decode A operand 6-byte immed error");
ASSERT(CHECK_TRUNCATE_TYPE_short(di->immed));
*opnd = opnd_create_far_pc((ushort)(short)di->immed, (app_pc)di->immed2);
di->size_immed = OPSZ_NA;
di->size_immed2 = OPSZ_NA;
}
return true;
}
case TYPE_O: {
/* no modrm byte, offset follows directly */
ptr_int_t immed = get_immed(di, resolve_addr_size(di));
*opnd = opnd_create_far_abs_addr(di->seg_override, (void *)immed, ressize);
return true;
}
case TYPE_X:
/* this means the memory address DS:(E)SI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_SI, REG_NULL, 0, 0, ressize);
} else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_ESI, REG_NULL, 0, 0, ressize);
} else {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_RSI, REG_NULL, 0, 0, ressize);
}
return true;
case TYPE_Y:
/* this means the memory address ES:(E)DI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(SEG_ES, REG_DI, REG_NULL, 0, 0, ressize);
else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(SEG_ES, REG_EDI, REG_NULL, 0, 0, ressize);
else
*opnd = opnd_create_far_base_disp(SEG_ES, REG_RDI, REG_NULL, 0, 0, ressize);
return true;
case TYPE_XLAT:
/* this means the memory address DS:(E)BX+AL */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_BX, REG_AL, 1, 0, ressize);
else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes))
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_EBX, REG_AL, 1, 0, ressize);
else
*opnd = opnd_create_far_base_disp(ds_seg(di), REG_RBX, REG_AL, 1, 0, ressize);
return true;
case TYPE_MASKMOVQ:
/* this means the memory address DS:(E)DI */
if (!X64_MODE(di) && TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_DI, REG_NULL, 0, 0, ressize);
} else if (!X64_MODE(di) || TEST(PREFIX_ADDR, di->prefixes)) {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_EDI, REG_NULL, 0, 0, ressize);
} else {
*opnd =
opnd_create_far_base_disp(ds_seg(di), REG_RDI, REG_NULL, 0, 0, ressize);
}
return true;
case TYPE_INDIR_REG:
/* FIXME: how know data size? for now just use reg size: our only use
* of this does not have a varying hardcoded reg, fortunately. */
*opnd = opnd_create_base_disp(opsize, REG_NULL, 0, 0, reg_get_size(opsize));
return true;
case TYPE_INDIR_VAR_XREG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by data16 */
case TYPE_INDIR_VAR_REG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by rex and data16 */
case TYPE_INDIR_VAR_XIREG: /* indirect reg varies by ss only, base is 4x8,
* opsize varies by data16 except on 64-bit Intel */
case TYPE_INDIR_VAR_XREG_OFFS_1: /* TYPE_INDIR_VAR_XREG + an offset */
case TYPE_INDIR_VAR_XREG_OFFS_8: /* TYPE_INDIR_VAR_XREG + an offset + scale */
case TYPE_INDIR_VAR_XREG_OFFS_N: /* TYPE_INDIR_VAR_XREG + an offset + scale */
case TYPE_INDIR_VAR_XIREG_OFFS_1: /* TYPE_INDIR_VAR_XIREG + an offset + scale */
case TYPE_INDIR_VAR_REG_OFFS_2: /* TYPE_INDIR_VAR_REG + offset + scale */
case TYPE_INDIR_VAR_XREG_SIZEx8: /* TYPE_INDIR_VAR_XREG + scale */
case TYPE_INDIR_VAR_REG_SIZEx2: /* TYPE_INDIR_VAR_REG + scale */
case TYPE_INDIR_VAR_REG_SIZEx3x5: /* TYPE_INDIR_VAR_REG + scale */
{
reg_id_t reg = resolve_var_reg(di, opsize, true /*doesn't matter*/,
false /*!shrinkable*/
_IF_X64(true /*d64*/) _IF_X64(false /*!growable*/)
_IF_X64(false /*!extendable*/));
opnd_size_t sz =
resolve_variable_size(di, indir_var_reg_size(di, optype), false /*not reg*/);
/* NOTE - needs to match size in opnd_type_ok() and instr_create.h */
*opnd = opnd_create_base_disp(
reg, REG_NULL, 0, indir_var_reg_offs_factor(optype) * opnd_size_in_bytes(sz),
sz);
return true;
}
case TYPE_INDIR_E:
/* how best mark as indirect?
* in current usage decode_modrm will be treated as indirect, becoming
* a base_disp operand, vs. an immed, which becomes a pc operand
* besides, Ap is just as indirect as i_Ep!
*/
return decode_operand(di, TYPE_E, opsize, opnd);
case TYPE_L: {
CLIENT_ASSERT(!TEST(PREFIX_EVEX_LL, di->prefixes), "XXX i#1312: unsupported.");
/* part of AVX: top 4 bits of 8-bit immed select xmm/ymm register */
ptr_int_t immed = get_immed(di, OPSZ_1);
reg_id_t reg = (reg_id_t)(immed & 0xf0) >> 4;
*opnd = opnd_create_reg(((TEST(PREFIX_VEX_L, di->prefixes) &&
/* see .LIG notes above */
expand_subreg_size(opsize) != OPSZ_16)
? REG_START_YMM
: REG_START_XMM) +
reg);
opnd_set_size(opnd, resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
case TYPE_H: {
/* As part of AVX and AVX-512, vex.vvvv selects xmm/ymm/zmm register. Note that
* vex.vvvv and evex.vvvv are a union.
*/
reg_id_t reg = (~di->vex_vvvv) & 0xf; /* bit-inverted */
if (TEST(PREFIX_EVEX_VV, di->prefixes)) {
/* This assumes that the register ranges of DR_REG_XMM, DR_REG_YMM, and
* DR_REG_ZMM are contiguous.
*/
reg += 16;
}
if (TEST(PREFIX_EVEX_LL, di->prefixes)) {
reg += DR_REG_START_ZMM;
} else if (TEST(PREFIX_VEX_L, di->prefixes) &&
/* see .LIG notes above */
expand_subreg_size(opsize) != OPSZ_16) {
reg += DR_REG_START_YMM;
} else {
reg += DR_REG_START_XMM;
}
*opnd = opnd_create_reg(reg);
opnd_set_size(opnd, resolve_variable_size(di, opsize, true /*is reg*/));
return true;
}
case TYPE_B: {
/* Part of XOP/AVX/AVX-512: vex.vvvv or evex.vvvv selects general-purpose
* register.
*/
if (di->evex_encoded)
*opnd = opnd_create_reg(decode_reg(DECODE_REG_EVEX, di, optype, opsize));
else
*opnd = opnd_create_reg(decode_reg(DECODE_REG_VEX, di, optype, opsize));
/* no need to set size as it's a GPR */
return true;
}
case TYPE_K_MODRM: {
/* part of AVX-512: modrm.rm selects opmask register or mem addr */
if (di->mod != 3) {
return decode_modrm(di, optype, opsize, NULL, opnd);
}
/* fall through*/
}
case TYPE_K_MODRM_R: {
/* part of AVX-512: modrm.rm selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_RM, di, optype, opsize));
return true;
}
case TYPE_K_REG: {
/* part of AVX-512: modrm.reg selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_REG, di, optype, opsize));
return true;
}
case TYPE_K_VEX: {
/* part of AVX-512: vex.vvvv selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_VEX, di, optype, opsize));
return true;
}
case TYPE_K_EVEX: {
/* part of AVX-512: evex.aaa selects opmask register */
*opnd = opnd_create_reg(decode_reg(DECODE_REG_OPMASK, di, optype, opsize));
return true;
}
default:
/* ok to assert, types coming only from instr_info_t */
CLIENT_ASSERT(false, "decode error: unknown operand type");
}
return false;
}
dr_pred_type_t
decode_predicate_from_instr_info(uint opcode, const instr_info_t *info)
{
if (TESTANY(HAS_PRED_CC | HAS_PRED_COMPLEX, info->flags)) {
if (TEST(HAS_PRED_CC, info->flags))
return DR_PRED_O + instr_cmovcc_to_jcc(opcode) - OP_jo;
else
return DR_PRED_COMPLEX;
}
return DR_PRED_NONE;
}
/****************************************************************************
* Exported routines
*/
/* Decodes only enough of the instruction at address pc to determine
* its eflags usage, which is returned in usage as EFLAGS_ constants
* or'ed together.
* This corresponds to halfway between Level 1 and Level 2: a Level 1 decoding
* plus eflags information (usually only at Level 2).
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*
* N.B.: an instruction that has an "undefined" effect on eflags is considered
* to write to eflags. This is fine since programs shouldn't be reading
* eflags after an undefined modification to them, but a weird program that
* relies on some undefined eflag thing might behave differently under dynamo
* than not!
*/
byte *
decode_eflags_usage(dcontext_t *dcontext, byte *pc, uint *usage,
dr_opnd_query_flags_t flags)
{
const instr_info_t *info;
decode_info_t di;
IF_X64(di.x86_mode = get_x86_mode(dcontext));
/* don't decode immeds, instead use decode_next_pc, it's faster */
read_instruction(pc, pc, &info, &di, true /* just opcode */ _IF_DEBUG(true));
*usage = instr_eflags_conditionally(
info->eflags, decode_predicate_from_instr_info(info->type, info), flags);
pc = decode_next_pc(dcontext, pc);
/* failure handled fine -- we'll go ahead and return the NULL */
return pc;
}
/* Decodes the opcode and eflags usage of instruction at address pc
* into instr.
* This corresponds to a Level 2 decoding.
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*/
byte *
decode_opcode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
const instr_info_t *info;
decode_info_t di;
int sz;
#ifdef X64
/* PR 251479: we need to know about all rip-relative addresses.
* Since change/setting raw bits invalidates, we must set this
* on every return. */
uint rip_rel_pos;
#endif
IF_X64(di.x86_mode = instr_get_x86_mode(instr));
/* when pass true to read_instruction it doesn't decode immeds,
* so have to call decode_next_pc, but that ends up being faster
* than decoding immeds!
*/
read_instruction(pc, pc, &info, &di,
true /* just opcode */
_IF_DEBUG(!TEST(INSTR_IGNORE_INVALID, instr->flags)));
sz = decode_sizeof(dcontext, pc, NULL _IF_X64(&rip_rel_pos));
IF_X64(instr_set_x86_mode(instr, get_x86_mode(dcontext)));
instr_set_opcode(instr, info->type);
/* read_instruction sets opcode to OP_INVALID for illegal instr.
* decode_sizeof will return 0 for _some_ illegal instrs, so we
* check it first since it's faster than instr_valid, but we have to
* also check instr_valid to catch all illegal instrs.
*/
if (sz == 0 || !instr_valid(instr)) {
CLIENT_ASSERT(!instr_valid(instr), "decode_opcode: invalid instr");
return NULL;
}
instr->eflags = info->eflags;
instr_set_eflags_valid(instr, true);
/* operands are NOT set */
instr_set_operands_valid(instr, false);
/* raw bits are valid though and crucial for encoding */
instr_set_raw_bits(instr, pc, sz);
/* must set rip_rel_pos after setting raw bits */
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return pc + sz;
}
#if defined(DEBUG) && !defined(STANDALONE_DECODER)
/* PR 215143: we must resolve variable sizes at decode time */
static bool
check_is_variable_size(opnd_t op)
{
if (opnd_is_memory_reference(op) ||
/* reg_get_size() fails on fp registers since no OPSZ for them */
(opnd_is_reg(op) && !reg_is_fp(opnd_get_reg(op))))
return !is_variable_size(opnd_get_size(op));
/* else no legitimate size to check */
return true;
}
#endif
/* Decodes the instruction at address pc into instr, filling in the
* instruction's opcode, eflags usage, prefixes, and operands.
* This corresponds to a Level 3 decoding.
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instruction.
*/
static byte *
decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr)
{
const instr_info_t *info;
decode_info_t di;
byte *next_pc;
int instr_num_dsts = 0, instr_num_srcs = 0;
opnd_t dsts[8];
opnd_t srcs[8];
CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED,
"decode: instr is already decoded, may need to call instr_reset()");
IF_X64(di.x86_mode = get_x86_mode(dcontext));
next_pc = read_instruction(pc, orig_pc, &info, &di,
false /* not just opcode,
decode operands too */
_IF_DEBUG(!TEST(INSTR_IGNORE_INVALID, instr->flags)));
instr_set_opcode(instr, info->type);
IF_X64(instr_set_x86_mode(instr, di.x86_mode));
/* failure up to this point handled fine -- we set opcode to OP_INVALID */
if (next_pc == NULL) {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid instr at " PFX "\n", pc);
CLIENT_ASSERT(!instr_valid(instr), "decode: invalid instr");
return NULL;
}
instr->eflags = info->eflags;
instr_set_eflags_valid(instr, true);
/* since we don't use set_src/set_dst we must explicitly say they're valid */
instr_set_operands_valid(instr, true);
/* read_instruction doesn't set di.len since only needed for rip-relative opnds */
IF_X64(
CLIENT_ASSERT_TRUNCATE(di.len, int, next_pc - pc, "internal truncation error"));
di.len = (int)(next_pc - pc);
di.opcode = info->type; /* used for opnd_create_immed_float_for_opcode */
instr->prefixes |= di.prefixes;
/* operands */
do {
if (info->dst1_type != TYPE_NONE) {
if (!decode_operand(&di, info->dst1_type, info->dst1_size,
&(dsts[instr_num_dsts++])))
goto decode_invalid;
ASSERT(check_is_variable_size(dsts[instr_num_dsts - 1]));
}
if (info->dst2_type != TYPE_NONE) {
if (!decode_operand(&di, info->dst2_type, info->dst2_size,
&(dsts[instr_num_dsts++])))
goto decode_invalid;
ASSERT(check_is_variable_size(dsts[instr_num_dsts - 1]));
}
if (info->src1_type != TYPE_NONE) {
if (!decode_operand(&di, info->src1_type, info->src1_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
if (info->src2_type != TYPE_NONE) {
if (!decode_operand(&di, info->src2_type, info->src2_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
if (info->src3_type != TYPE_NONE) {
if (!decode_operand(&di, info->src3_type, info->src3_size,
&(srcs[instr_num_srcs++])))
goto decode_invalid;
ASSERT(check_is_variable_size(srcs[instr_num_srcs - 1]));
}
/* extra operands:
* we take advantage of the fact that all instructions that need extra
* operands have only one encoding, so the code field points to instr_info_t
* structures containing the extra operands
*/
if ((info->flags & HAS_EXTRA_OPERANDS) != 0) {
if ((info->flags & EXTRAS_IN_CODE_FIELD) != 0)
info = (const instr_info_t *)(info->code);
else /* extra operands are in next entry */
info = info + 1;
} else
break;
} while (true);
/* some operands add to di.prefixes so we copy again */
instr->prefixes |= di.prefixes;
if (di.seg_override == SEG_FS)
instr->prefixes |= PREFIX_SEG_FS;
if (di.seg_override == SEG_GS)
instr->prefixes |= PREFIX_SEG_GS;
/* now copy operands into their real slots */
instr_set_num_opnds(dcontext, instr, instr_num_dsts, instr_num_srcs);
if (instr_num_dsts > 0) {
memcpy(instr->dsts, dsts, instr_num_dsts * sizeof(opnd_t));
}
if (instr_num_srcs > 0) {
/* remember that src0 is static */
instr->src0 = srcs[0];
if (instr_num_srcs > 1) {
memcpy(instr->srcs, &(srcs[1]), (instr_num_srcs - 1) * sizeof(opnd_t));
}
}
if (TESTANY(HAS_PRED_CC | HAS_PRED_COMPLEX, info->flags))
instr_set_predicate(instr, decode_predicate_from_instr_info(di.opcode, info));
/* check for invalid prefixes that depend on operand types */
if (TEST(PREFIX_LOCK, di.prefixes)) {
/* check for invalid opcode, list on p3-397 of IA-32 vol 2 */
switch (instr_get_opcode(instr)) {
case OP_add:
case OP_adc:
case OP_and:
case OP_btc:
case OP_btr:
case OP_bts:
case OP_cmpxchg:
case OP_cmpxchg8b:
case OP_dec:
case OP_inc:
case OP_neg:
case OP_not:
case OP_or:
case OP_sbb:
case OP_sub:
case OP_xor:
case OP_xadd:
case OP_xchg: {
/* still illegal unless dest is mem op rather than src */
CLIENT_ASSERT(instr->num_dsts > 0, "internal lock prefix check error");
if (!opnd_is_memory_reference(instr->dsts[0])) {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid lock prefix at " PFX "\n",
pc);
goto decode_invalid;
}
break;
}
default: {
LOG(THREAD, LOG_INTERP, 3, "decode: invalid lock prefix at " PFX "\n", pc);
goto decode_invalid;
}
}
}
/* PREFIX_XRELEASE is allowed w/o LOCK on mov_st, but use of it or PREFIX_XACQUIRE
* in other situations does not result in #UD so we ignore.
*/
if (orig_pc != pc) {
/* We do not want to copy when encoding and condone an invalid
* relative target
*/
instr_set_raw_bits_valid(instr, false);
instr_set_translation(instr, orig_pc);
} else {
/* we set raw bits AFTER setting all srcs and dsts b/c setting
* a src or dst marks instr as having invalid raw bits
*/
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc)));
instr_set_raw_bits(instr, pc, (uint)(next_pc - pc));
#ifdef X64
if (X64_MODE(&di) && TEST(HAS_MODRM, info->flags) && di.mod == 0 && di.rm == 5) {
CLIENT_ASSERT(di.disp_abs > di.start_pc, "decode: internal rip-rel error");
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(di.disp_abs - di.start_pc),
"decode: internal rip-rel error");
/* must do this AFTER setting raw bits to avoid being invalidated */
instr_set_rip_rel_pos(instr, (int)(di.disp_abs - di.start_pc));
}
#endif
}
return next_pc;
decode_invalid:
instr_set_operands_valid(instr, false);
instr_set_opcode(instr, OP_INVALID);
return NULL;
}
byte *
decode(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
return decode_common(dcontext, pc, pc, instr);
}
byte *
decode_from_copy(dcontext_t *dcontext, byte *copy_pc, byte *orig_pc, instr_t *instr)
{
return decode_common(dcontext, copy_pc, orig_pc, instr);
}
const instr_info_t *
get_next_instr_info(const instr_info_t *info)
{
return (const instr_info_t *)(info->code);
}
byte
decode_first_opcode_byte(int opcode)
{
const instr_info_t *info = op_instr[opcode];
return (byte)((info->opcode & 0x00ff0000) >> 16);
}
DR_API
const char *
decode_opcode_name(int opcode)
{
const instr_info_t *info = op_instr[opcode];
return info->name;
}
const instr_info_t *
opcode_to_encoding_info(uint opc, dr_isa_mode_t isa_mode)
{
return op_instr[opc];
}
app_pc
dr_app_pc_as_jump_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
app_pc
dr_app_pc_as_load_target(dr_isa_mode_t isa_mode, app_pc pc)
{
return pc;
}
#ifdef DEBUG
void
decode_debug_checks_arch(void)
{
/* empty */
}
#endif
#ifdef DECODE_UNIT_TEST
# include "instr_create.h"
/* FIXME: Tried putting this inside a separate unit-decode.c file, but
* required creating a unit-decode_table.c file. Since the
* infrastructure is not fully set up, currently leaving this here
* FIXME: beef up to check if something went wrong
*/
static bool
unit_check_decode_ff_opcode()
{
static int do_once = 0;
instr_t instr;
byte modrm, sib;
byte raw_bytes[] = {
0xff, 0x0, 0x0, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
0xff, 0xab, 0xbc, 0xcd, 0xde, 0xef, 0xfa,
};
app_pc next_pc = NULL;
for (modrm = 0x0; modrm < 0xff; modrm++) {
raw_bytes[1] = modrm;
for (sib = 0x0; sib < 0xff; sib++) {
raw_bytes[2] = sib;
/* set up instr for decode_opcode */
instr_init(GLOBAL_DCONTEXT, &instr);
instr.bytes = raw_bytes;
instr.length = 15;
instr_set_raw_bits_valid(&instr, true);
instr_set_operands_valid(&instr, false);
next_pc = decode_opcode(GLOBAL_DCONTEXT, instr.bytes, &instr);
if (next_pc != NULL && instr.opcode != OP_INVALID &&
instr.opcode != OP_UNDECODED) {
print_file(STDERR, "## %02x %02x %02x len=%d\n", instr.bytes[0],
instr.bytes[1], instr.bytes[2], instr.length);
}
}
}
return 0;
}
/* Standalone building is still broken so I tested this by calling
* from a real DR build.
*/
# define CHECK_ENCODE_OPCODE(dcontext, instr, pc, opc, ...) \
instr = INSTR_CREATE_##opc(dcontext, ##__VA_ARGS__); \
instr_encode(dcontext, instr, pc); \
instr_reset(dcontext, instr); \
decode(dcontext, pc, instr); \
/* FIXME: use EXPECT */ \
CLIENT_ASSERT(instr_get_opcode(instr) == OP_##opc, "unit test"); \
instr_destroy(dcontext, instr);
/* FIXME: case 8212: add checks for every single instr type */
static bool
unit_check_sse3()
{
dcontext_t *dcontext = get_thread_private_dcontext();
byte buf[32];
instr_t *instr;
CHECK_ENCODE_OPCODE(dcontext, instr, buf, mwait);
CHECK_ENCODE_OPCODE(dcontext, instr, buf, monitor);
CHECK_ENCODE_OPCODE(dcontext, instr, buf, haddpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, haddps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, hsubpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, hsubps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, addsubpd, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, addsubps, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, lddqu, opnd_create_reg(REG_XMM7),
opnd_create_base_disp(REG_NULL, REG_NULL, 0, 0, OPSZ_16));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movsldup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movshdup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
CHECK_ENCODE_OPCODE(dcontext, instr, buf, movddup, opnd_create_reg(REG_XMM7),
opnd_create_reg(REG_XMM2));
/* not sse3 but I fixed it at same time so here to test */
CHECK_ENCODE_OPCODE(dcontext, instr, buf, cmpxchg8b,
opnd_create_base_disp(REG_NULL, REG_NULL, 0, 0, OPSZ_8));
return true;
}
int
main()
{
bool res;
standalone_init();
res = unit_check_sse3();
res = unit_check_decode_ff_opcode() && res;
return res;
}
#endif /* DECODE_UNIT_TEST */
| 1 | 16,978 | This is debug-build-only: we want a release-build notice. SYSLOG takes the name w/o the MSG and no _STRING. Args are via events.mc specifiers. | DynamoRIO-dynamorio | c |
@@ -37,6 +37,10 @@ function matchesParentDomain(srvAddress, parentDomain) {
function parseSrvConnectionString(uri, options, callback) {
const result = URL.parse(uri, true);
+ if (options.directConnection || options.directconnection) {
+ return callback(new MongoParseError('directConnection not supported with SRV URI'));
+ }
+
if (result.hostname.split('.').length < 3) {
return callback(new MongoParseError('URI does not have hostname, domain name and tld'));
} | 1 | 'use strict';
const URL = require('url');
const qs = require('querystring');
const dns = require('dns');
const ReadPreference = require('./read_preference');
const { MongoParseError } = require('./error');
/**
* The following regular expression validates a connection string and breaks the
* provide string into the following capture groups: [protocol, username, password, hosts]
*/
const HOSTS_RX = /(mongodb(?:\+srv|)):\/\/(?: (?:[^:]*) (?: : ([^@]*) )? @ )?([^/?]*)(?:\/|)(.*)/;
/**
* Determines whether a provided address matches the provided parent domain in order
* to avoid certain attack vectors.
*
* @param {string} srvAddress The address to check against a domain
* @param {string} parentDomain The domain to check the provided address against
* @returns {boolean} Whether the provided address matches the parent domain
*/
function matchesParentDomain(srvAddress, parentDomain) {
const regex = /^.*?\./;
const srv = `.${srvAddress.replace(regex, '')}`;
const parent = `.${parentDomain.replace(regex, '')}`;
return srv.endsWith(parent);
}
/**
* Lookup a `mongodb+srv` connection string, combine the parts and reparse it as a normal
* connection string.
*
* @param {string} uri The connection string to parse
* @param {object} options Optional user provided connection string options
* @param {Function} callback
*/
function parseSrvConnectionString(uri, options, callback) {
const result = URL.parse(uri, true);
if (result.hostname.split('.').length < 3) {
return callback(new MongoParseError('URI does not have hostname, domain name and tld'));
}
result.domainLength = result.hostname.split('.').length;
if (result.pathname && result.pathname.match(',')) {
return callback(new MongoParseError('Invalid URI, cannot contain multiple hostnames'));
}
if (result.port) {
return callback(new MongoParseError(`Ports not accepted with '${PROTOCOL_MONGODB_SRV}' URIs`));
}
// Resolve the SRV record and use the result as the list of hosts to connect to.
const lookupAddress = result.host;
dns.resolveSrv(`_mongodb._tcp.${lookupAddress}`, (err, addresses) => {
if (err) return callback(err);
if (addresses.length === 0) {
return callback(new MongoParseError('No addresses found at host'));
}
for (let i = 0; i < addresses.length; i++) {
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
return callback(
new MongoParseError('Server record does not share hostname with parent URI')
);
}
}
// Convert the original URL to a non-SRV URL.
result.protocol = 'mongodb';
result.host = addresses.map(address => `${address.name}:${address.port}`).join(',');
// Default to SSL true if it's not specified.
if (
!('ssl' in options) &&
(!result.search || !('ssl' in result.query) || result.query.ssl === null)
) {
result.query.ssl = true;
}
// Resolve TXT record and add options from there if they exist.
dns.resolveTxt(lookupAddress, (err, record) => {
if (err) {
if (err.code !== 'ENODATA') {
return callback(err);
}
record = null;
}
if (record) {
if (record.length > 1) {
return callback(new MongoParseError('Multiple text records not allowed'));
}
record = qs.parse(record[0].join(''));
if (Object.keys(record).some(key => key !== 'authSource' && key !== 'replicaSet')) {
return callback(
new MongoParseError('Text record must only set `authSource` or `replicaSet`')
);
}
result.query = Object.assign({}, record, result.query);
}
// Set completed options back into the URL object.
result.search = qs.stringify(result.query);
const finalString = URL.format(result);
parseConnectionString(finalString, options, (err, ret) => {
if (err) {
callback(err);
return;
}
callback(null, Object.assign({}, ret, { srvHost: lookupAddress }));
});
});
});
}
/**
* Parses a query string item according to the connection string spec
*
* @param {string} key The key for the parsed value
* @param {Array|string} value The value to parse
* @returns {Array|object|string} The parsed value
*/
function parseQueryStringItemValue(key, value) {
if (Array.isArray(value)) {
// deduplicate and simplify arrays
value = value.filter((v, idx) => value.indexOf(v) === idx);
if (value.length === 1) value = value[0];
} else if (value.indexOf(':') > 0) {
value = value.split(',').reduce((result, pair) => {
const parts = pair.split(':');
result[parts[0]] = parseQueryStringItemValue(key, parts[1]);
return result;
}, {});
} else if (value.indexOf(',') > 0) {
value = value.split(',').map(v => {
return parseQueryStringItemValue(key, v);
});
} else if (value.toLowerCase() === 'true' || value.toLowerCase() === 'false') {
value = value.toLowerCase() === 'true';
} else if (!Number.isNaN(value) && !STRING_OPTIONS.has(key)) {
const numericValue = parseFloat(value);
if (!Number.isNaN(numericValue)) {
value = parseFloat(value);
}
}
return value;
}
// Options that are known boolean types
const BOOLEAN_OPTIONS = new Set([
'slaveok',
'slave_ok',
'sslvalidate',
'fsync',
'safe',
'retrywrites',
'j'
]);
// Known string options, only used to bypass Number coercion in `parseQueryStringItemValue`
const STRING_OPTIONS = new Set(['authsource', 'replicaset']);
// Supported text representations of auth mechanisms
// NOTE: this list exists in native already, if it is merged here we should deduplicate
const AUTH_MECHANISMS = new Set([
'GSSAPI',
'MONGODB-AWS',
'MONGODB-X509',
'MONGODB-CR',
'DEFAULT',
'SCRAM-SHA-1',
'SCRAM-SHA-256',
'PLAIN'
]);
// Lookup table used to translate normalized (lower-cased) forms of connection string
// options to their expected camelCase version
const CASE_TRANSLATION = {
replicaset: 'replicaSet',
connecttimeoutms: 'connectTimeoutMS',
sockettimeoutms: 'socketTimeoutMS',
maxpoolsize: 'maxPoolSize',
minpoolsize: 'minPoolSize',
maxidletimems: 'maxIdleTimeMS',
waitqueuemultiple: 'waitQueueMultiple',
waitqueuetimeoutms: 'waitQueueTimeoutMS',
wtimeoutms: 'wtimeoutMS',
readconcern: 'readConcern',
readconcernlevel: 'readConcernLevel',
readpreference: 'readPreference',
maxstalenessseconds: 'maxStalenessSeconds',
readpreferencetags: 'readPreferenceTags',
authsource: 'authSource',
authmechanism: 'authMechanism',
authmechanismproperties: 'authMechanismProperties',
gssapiservicename: 'gssapiServiceName',
localthresholdms: 'localThresholdMS',
serverselectiontimeoutms: 'serverSelectionTimeoutMS',
serverselectiontryonce: 'serverSelectionTryOnce',
heartbeatfrequencyms: 'heartbeatFrequencyMS',
retrywrites: 'retryWrites',
uuidrepresentation: 'uuidRepresentation',
zlibcompressionlevel: 'zlibCompressionLevel',
tlsallowinvalidcertificates: 'tlsAllowInvalidCertificates',
tlsallowinvalidhostnames: 'tlsAllowInvalidHostnames',
tlsinsecure: 'tlsInsecure',
tlsdisablecertificaterevocationcheck: 'tlsDisableCertificateRevocationCheck',
tlsdisableocspendpointcheck: 'tlsDisableOCSPEndpointCheck',
tlscafile: 'tlsCAFile',
tlscertificatekeyfile: 'tlsCertificateKeyFile',
tlscertificatekeyfilepassword: 'tlsCertificateKeyFilePassword',
wtimeout: 'wTimeoutMS',
j: 'journal'
};
/**
* Sets the value for `key`, allowing for any required translation
*
* @param {object} obj The object to set the key on
* @param {string} key The key to set the value for
* @param {any} value The value to set
* @param {object} options The options used for option parsing
*/
function applyConnectionStringOption(obj, key, value, options) {
// simple key translation
if (key === 'journal') {
key = 'j';
} else if (key === 'wtimeoutms') {
key = 'wtimeout';
}
// more complicated translation
if (BOOLEAN_OPTIONS.has(key)) {
value = value === 'true' || value === true;
} else if (key === 'appname') {
value = decodeURIComponent(value);
} else if (key === 'readconcernlevel') {
obj['readConcernLevel'] = value;
key = 'readconcern';
value = { level: value };
}
// simple validation
if (key === 'compressors') {
value = Array.isArray(value) ? value : [value];
if (!value.every(c => c === 'snappy' || c === 'zlib')) {
throw new MongoParseError(
'Value for `compressors` must be at least one of: `snappy`, `zlib`'
);
}
}
if (key === 'authmechanism' && !AUTH_MECHANISMS.has(value)) {
throw new MongoParseError(
`Value for authMechanism must be one of: ${Array.from(AUTH_MECHANISMS).join(
', '
)}, found: ${value}`
);
}
if (key === 'readpreference' && !ReadPreference.isValid(value)) {
throw new MongoParseError(
'Value for `readPreference` must be one of: `primary`, `primaryPreferred`, `secondary`, `secondaryPreferred`, `nearest`'
);
}
if (key === 'zlibcompressionlevel' && (value < -1 || value > 9)) {
throw new MongoParseError('zlibCompressionLevel must be an integer between -1 and 9');
}
// special cases
if (key === 'compressors' || key === 'zlibcompressionlevel') {
obj.compression = obj.compression || {};
obj = obj.compression;
}
if (key === 'authmechanismproperties') {
if (typeof value.SERVICE_NAME === 'string') obj.gssapiServiceName = value.SERVICE_NAME;
if (typeof value.SERVICE_REALM === 'string') obj.gssapiServiceRealm = value.SERVICE_REALM;
if (typeof value.CANONICALIZE_HOST_NAME !== 'undefined') {
obj.gssapiCanonicalizeHostName = value.CANONICALIZE_HOST_NAME;
}
}
if (key === 'readpreferencetags') {
value = Array.isArray(value) ? splitArrayOfMultipleReadPreferenceTags(value) : [value];
}
// set the actual value
if (options.caseTranslate && CASE_TRANSLATION[key]) {
obj[CASE_TRANSLATION[key]] = value;
return;
}
obj[key] = value;
}
const USERNAME_REQUIRED_MECHANISMS = new Set([
'GSSAPI',
'MONGODB-CR',
'PLAIN',
'SCRAM-SHA-1',
'SCRAM-SHA-256'
]);
function splitArrayOfMultipleReadPreferenceTags(value) {
const parsedTags = [];
for (let i = 0; i < value.length; i++) {
parsedTags[i] = {};
value[i].split(',').forEach(individualTag => {
const splitTag = individualTag.split(':');
parsedTags[i][splitTag[0]] = splitTag[1];
});
}
return parsedTags;
}
/**
* Modifies the parsed connection string object taking into account expectations we
* have for authentication-related options.
*
* @param {object} parsed The parsed connection string result
* @returns The parsed connection string result possibly modified for auth expectations
*/
function applyAuthExpectations(parsed) {
if (parsed.options == null) {
return;
}
const options = parsed.options;
const authSource = options.authsource || options.authSource;
if (authSource != null) {
parsed.auth = Object.assign({}, parsed.auth, { db: authSource });
}
const authMechanism = options.authmechanism || options.authMechanism;
if (authMechanism != null) {
if (
USERNAME_REQUIRED_MECHANISMS.has(authMechanism) &&
(!parsed.auth || parsed.auth.username == null)
) {
throw new MongoParseError(`Username required for mechanism \`${authMechanism}\``);
}
if (authMechanism === 'GSSAPI') {
if (authSource != null && authSource !== '$external') {
throw new MongoParseError(
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
);
}
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
}
if (authMechanism === 'MONGODB-AWS') {
if (authSource != null && authSource !== '$external') {
throw new MongoParseError(
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
);
}
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
}
if (authMechanism === 'MONGODB-X509') {
if (parsed.auth && parsed.auth.password != null) {
throw new MongoParseError(`Password not allowed for mechanism \`${authMechanism}\``);
}
if (authSource != null && authSource !== '$external') {
throw new MongoParseError(
`Invalid source \`${authSource}\` for mechanism \`${authMechanism}\` specified.`
);
}
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
}
if (authMechanism === 'PLAIN') {
if (parsed.auth && parsed.auth.db == null) {
parsed.auth = Object.assign({}, parsed.auth, { db: '$external' });
}
}
}
// default to `admin` if nothing else was resolved
if (parsed.auth && parsed.auth.db == null) {
parsed.auth = Object.assign({}, parsed.auth, { db: 'admin' });
}
return parsed;
}
/**
* Parses a query string according the connection string spec.
*
* @param {string} query The query string to parse
* @param {object} [options] The options used for options parsing
* @returns {object|Error} The parsed query string as an object, or an error if one was encountered
*/
function parseQueryString(query, options) {
const result = {};
let parsedQueryString = qs.parse(query);
checkTLSQueryString(parsedQueryString);
for (const key in parsedQueryString) {
const value = parsedQueryString[key];
if (value === '' || value == null) {
throw new MongoParseError('Incomplete key value pair for option');
}
const normalizedKey = key.toLowerCase();
const parsedValue = parseQueryStringItemValue(normalizedKey, value);
applyConnectionStringOption(result, normalizedKey, parsedValue, options);
}
// special cases for known deprecated options
if (result.wtimeout && result.wtimeoutms) {
delete result.wtimeout;
console.warn('Unsupported option `wtimeout` specified');
}
return Object.keys(result).length ? result : null;
}
/// Adds support for modern `tls` variants of out `ssl` options
function translateTLSOptions(queryString) {
if (queryString.tls) {
queryString.ssl = queryString.tls;
}
if (queryString.tlsInsecure) {
queryString.checkServerIdentity = false;
queryString.sslValidate = false;
} else {
Object.assign(queryString, {
checkServerIdentity: queryString.tlsAllowInvalidHostnames ? false : true,
sslValidate: queryString.tlsAllowInvalidCertificates ? false : true
});
}
if (queryString.tlsCAFile) {
queryString.ssl = true;
queryString.sslCA = queryString.tlsCAFile;
}
if (queryString.tlsCertificateKeyFile) {
queryString.ssl = true;
if (queryString.tlsCertificateFile) {
queryString.sslCert = queryString.tlsCertificateFile;
queryString.sslKey = queryString.tlsCertificateKeyFile;
} else {
queryString.sslKey = queryString.tlsCertificateKeyFile;
queryString.sslCert = queryString.tlsCertificateKeyFile;
}
}
if (queryString.tlsCertificateKeyFilePassword) {
queryString.ssl = true;
queryString.sslPass = queryString.tlsCertificateKeyFilePassword;
}
return queryString;
}
/**
* Checks a query string for invalid tls options according to the URI options spec.
*
* @param {string} queryString The query string to check
* @throws {MongoParseError}
*/
function checkTLSQueryString(queryString) {
const queryStringKeys = Object.keys(queryString);
const tlsValue = assertTlsOptionsAreEqual('tls', queryString, queryStringKeys);
const sslValue = assertTlsOptionsAreEqual('ssl', queryString, queryStringKeys);
if (tlsValue != null && sslValue != null) {
if (tlsValue !== sslValue) {
throw new MongoParseError('All values of `tls` and `ssl` must be the same.');
}
}
}
/**
* Checks options object if both options are present (any value) will throw an error.
*
* @param {object} options The options used for options parsing
* @param {string} optionKeyA A options key
* @param {string} optionKeyB B options key
* @throws {MongoParseError}
*/
function assertRepelOptions(options, optionKeyA, optionKeyB) {
if (
Object.prototype.hasOwnProperty.call(options, optionKeyA) &&
Object.prototype.hasOwnProperty.call(options, optionKeyB)
) {
throw new MongoParseError(`The \`${optionKeyA}\` option cannot be used with \`${optionKeyB}\``);
}
}
/**
* Checks if TLS options are valid
*
* @param {object} options The options used for options parsing
* @throws {MongoParseError}
*/
function checkTLSOptions(options) {
if (!options) return null;
const check = (a, b) => assertRepelOptions(options, a, b);
check('tlsInsecure', 'tlsAllowInvalidCertificates');
check('tlsInsecure', 'tlsAllowInvalidHostnames');
check('tlsInsecure', 'tlsDisableCertificateRevocationCheck');
check('tlsInsecure', 'tlsDisableOCSPEndpointCheck');
check('tlsAllowInvalidCertificates', 'tlsDisableCertificateRevocationCheck');
check('tlsAllowInvalidCertificates', 'tlsDisableOCSPEndpointCheck');
check('tlsDisableCertificateRevocationCheck', 'tlsDisableOCSPEndpointCheck');
}
/**
* Checks a query string to ensure all tls/ssl options are the same.
*
* @param {string} optionName The key (tls or ssl) to check
* @param {string} queryString The query string to check
* @param {any} queryStringKeys
* @throws {MongoParseError}
* @returns The value of the tls/ssl option
*/
function assertTlsOptionsAreEqual(optionName, queryString, queryStringKeys) {
const queryStringHasTLSOption = queryStringKeys.indexOf(optionName) !== -1;
let optionValue;
if (Array.isArray(queryString[optionName])) {
optionValue = queryString[optionName][0];
} else {
optionValue = queryString[optionName];
}
if (queryStringHasTLSOption) {
if (Array.isArray(queryString[optionName])) {
const firstValue = queryString[optionName][0];
queryString[optionName].forEach(tlsValue => {
if (tlsValue !== firstValue) {
throw new MongoParseError(`All values of ${optionName} must be the same.`);
}
});
}
}
return optionValue;
}
const PROTOCOL_MONGODB = 'mongodb';
const PROTOCOL_MONGODB_SRV = 'mongodb+srv';
const SUPPORTED_PROTOCOLS = [PROTOCOL_MONGODB, PROTOCOL_MONGODB_SRV];
/**
* Parses a MongoDB connection string
*
* @param {string} uri the MongoDB connection string to parse
* @param {object} [options] Optional settings.
* @param {boolean} [options.caseTranslate] Whether the parser should translate options back into camelCase after normalization
* @param {parseCallback} callback
*/
function parseConnectionString(uri, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, { caseTranslate: true }, options);
// Check for bad uris before we parse
try {
URL.parse(uri);
} catch (e) {
return callback(new MongoParseError('URI malformed, cannot be parsed'));
}
const cap = uri.match(HOSTS_RX);
if (!cap) {
return callback(new MongoParseError('Invalid connection string'));
}
const protocol = cap[1];
if (SUPPORTED_PROTOCOLS.indexOf(protocol) === -1) {
return callback(new MongoParseError('Invalid protocol provided'));
}
if (protocol === PROTOCOL_MONGODB_SRV) {
return parseSrvConnectionString(uri, options, callback);
}
const dbAndQuery = cap[4].split('?');
const db = dbAndQuery.length > 0 ? dbAndQuery[0] : null;
const query = dbAndQuery.length > 1 ? dbAndQuery[1] : null;
let parsedOptions;
try {
// this just parses the query string NOT the connection options object
parsedOptions = parseQueryString(query, options);
// this merges the options object with the query string object above
parsedOptions = Object.assign({}, parsedOptions, options);
checkTLSOptions(parsedOptions);
} catch (parseError) {
return callback(parseError);
}
const auth = { username: null, password: null, db: db && db !== '' ? qs.unescape(db) : null };
if (parsedOptions.auth) {
// maintain support for legacy options passed into `MongoClient`
if (parsedOptions.auth.username) auth.username = parsedOptions.auth.username;
if (parsedOptions.auth.user) auth.username = parsedOptions.auth.user;
if (parsedOptions.auth.password) auth.password = parsedOptions.auth.password;
} else {
if (parsedOptions.username) auth.username = parsedOptions.username;
if (parsedOptions.user) auth.username = parsedOptions.user;
if (parsedOptions.password) auth.password = parsedOptions.password;
}
if (cap[4].split('?')[0].indexOf('@') !== -1) {
return callback(new MongoParseError('Unescaped slash in userinfo section'));
}
const authorityParts = cap[3].split('@');
if (authorityParts.length > 2) {
return callback(new MongoParseError('Unescaped at-sign in authority section'));
}
if (authorityParts[0] == null || authorityParts[0] === '') {
return callback(new MongoParseError('No username provided in authority section'));
}
if (authorityParts.length > 1) {
const authParts = authorityParts.shift().split(':');
if (authParts.length > 2) {
return callback(new MongoParseError('Unescaped colon in authority section'));
}
if (authParts[0] === '') {
return callback(new MongoParseError('Invalid empty username provided'));
}
if (!auth.username) auth.username = qs.unescape(authParts[0]);
if (!auth.password) auth.password = authParts[1] ? qs.unescape(authParts[1]) : null;
}
let hostParsingError = null;
const hosts = authorityParts
.shift()
.split(',')
.map(host => {
let parsedHost = URL.parse(`mongodb://${host}`);
if (parsedHost.path === '/:') {
hostParsingError = new MongoParseError('Double colon in host identifier');
return null;
}
// heuristically determine if we're working with a domain socket
if (host.match(/\.sock/)) {
parsedHost.hostname = qs.unescape(host);
parsedHost.port = null;
}
if (Number.isNaN(parsedHost.port)) {
hostParsingError = new MongoParseError('Invalid port (non-numeric string)');
return;
}
const result = {
host: parsedHost.hostname,
port: parsedHost.port ? parseInt(parsedHost.port) : 27017
};
if (result.port === 0) {
hostParsingError = new MongoParseError('Invalid port (zero) with hostname');
return;
}
if (result.port > 65535) {
hostParsingError = new MongoParseError('Invalid port (larger than 65535) with hostname');
return;
}
if (result.port < 0) {
hostParsingError = new MongoParseError('Invalid port (negative number)');
return;
}
return result;
})
.filter(host => !!host);
if (hostParsingError) {
return callback(hostParsingError);
}
if (hosts.length === 0 || hosts[0].host === '' || hosts[0].host === null) {
return callback(new MongoParseError('No hostname or hostnames provided in connection string'));
}
const result = {
hosts: hosts,
auth: auth.db || auth.username ? auth : null,
options: Object.keys(parsedOptions).length ? parsedOptions : null
};
if (result.auth && result.auth.db) {
result.defaultDatabase = result.auth.db;
} else {
result.defaultDatabase = 'test';
}
// support modern `tls` variants to SSL options
result.options = translateTLSOptions(result.options);
try {
applyAuthExpectations(result);
} catch (authError) {
return callback(authError);
}
callback(null, result);
}
module.exports = {
parseConnectionString
};
| 1 | 17,596 | is it possible to have both forms here? I was hoping we wouldn't be introducing more cases where we had to check the upper and lowercase version of URI options. | mongodb-node-mongodb-native | js |
@@ -230,11 +230,11 @@ func BenchmarkFloat64LastValueAdd(b *testing.B) {
// Histograms
-func benchmarkInt64HistogramAdd(b *testing.B, name string) {
+func BenchmarkInt64HistogramAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
- mea := fix.meterMust().NewInt64Histogram(name)
+ mea := fix.meterMust().NewInt64Histogram("int64.histogram")
b.ResetTimer()
| 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"fmt"
"math/rand"
"testing"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/metric/sdkapi"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/export"
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
)
type benchFixture struct {
meter metric.Meter
accumulator *sdk.Accumulator
B *testing.B
export.AggregatorSelector
}
func newFixture(b *testing.B) *benchFixture {
b.ReportAllocs()
bf := &benchFixture{
B: b,
AggregatorSelector: processortest.AggregatorSelector(),
}
bf.accumulator = sdk.NewAccumulator(bf)
bf.meter = metric.WrapMeterImpl(bf.accumulator)
return bf
}
func (f *benchFixture) Process(export.Accumulation) error {
return nil
}
func (f *benchFixture) Meter(_ string, _ ...metric.MeterOption) metric.Meter {
return f.meter
}
func (f *benchFixture) meterMust() metric.MeterMust {
return metric.Must(f.meter)
}
func makeLabels(n int) []attribute.KeyValue {
used := map[string]bool{}
l := make([]attribute.KeyValue, n)
for i := 0; i < n; i++ {
var k string
for {
k = fmt.Sprint("k", rand.Intn(1000000000))
if !used[k] {
used[k] = true
break
}
}
l[i] = attribute.String(k, fmt.Sprint("v", rand.Intn(1000000000)))
}
return l
}
func benchmarkLabels(b *testing.B, n int) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(n)
cnt := fix.meterMust().NewInt64Counter("int64.sum")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs...)
}
}
func BenchmarkInt64CounterAddWithLabels_1(b *testing.B) {
benchmarkLabels(b, 1)
}
func BenchmarkInt64CounterAddWithLabels_2(b *testing.B) {
benchmarkLabels(b, 2)
}
func BenchmarkInt64CounterAddWithLabels_4(b *testing.B) {
benchmarkLabels(b, 4)
}
func BenchmarkInt64CounterAddWithLabels_8(b *testing.B) {
benchmarkLabels(b, 8)
}
func BenchmarkInt64CounterAddWithLabels_16(b *testing.B) {
benchmarkLabels(b, 16)
}
// Note: performance does not depend on label set size for the
// benchmarks below--all are benchmarked for a single attribute.
// Iterators
var benchmarkIteratorVar attribute.KeyValue
func benchmarkIterator(b *testing.B, n int) {
labels := attribute.NewSet(makeLabels(n)...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
iter := labels.Iter()
for iter.Next() {
benchmarkIteratorVar = iter.Label()
}
}
}
func BenchmarkIterator_0(b *testing.B) {
benchmarkIterator(b, 0)
}
func BenchmarkIterator_1(b *testing.B) {
benchmarkIterator(b, 1)
}
func BenchmarkIterator_2(b *testing.B) {
benchmarkIterator(b, 2)
}
func BenchmarkIterator_4(b *testing.B) {
benchmarkIterator(b, 4)
}
func BenchmarkIterator_8(b *testing.B) {
benchmarkIterator(b, 8)
}
func BenchmarkIterator_16(b *testing.B) {
benchmarkIterator(b, 16)
}
// Counters
func BenchmarkGlobalInt64CounterAddWithSDK(b *testing.B) {
// Compare with BenchmarkInt64CounterAdd() to see overhead of global
// package. This is in the SDK to avoid the API from depending on the
// SDK.
ctx := context.Background()
fix := newFixture(b)
sdk := global.Meter("test")
global.SetMeterProvider(fix)
labs := []attribute.KeyValue{attribute.String("A", "B")}
cnt := Must(sdk).NewInt64Counter("int64.sum")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs...)
}
}
func BenchmarkInt64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meterMust().NewInt64Counter("int64.sum")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs...)
}
}
func BenchmarkFloat64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meterMust().NewFloat64Counter("float64.sum")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1.1, labs...)
}
}
// LastValue
func BenchmarkInt64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meterMust().NewInt64Histogram("int64.lastvalue")
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, int64(i), labs...)
}
}
func BenchmarkFloat64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meterMust().NewFloat64Histogram("float64.lastvalue")
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, float64(i), labs...)
}
}
// Histograms
func benchmarkInt64HistogramAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meterMust().NewInt64Histogram(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, int64(i), labs...)
}
}
func benchmarkFloat64HistogramAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meterMust().NewFloat64Histogram(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, float64(i), labs...)
}
}
// Observers
func BenchmarkObserverRegistration(b *testing.B) {
fix := newFixture(b)
names := make([]string, 0, b.N)
for i := 0; i < b.N; i++ {
names = append(names, fmt.Sprintf("test.%d.lastvalue", i))
}
cb := func(_ context.Context, result metric.Int64ObserverResult) {}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.meterMust().NewInt64GaugeObserver(names[i], cb)
}
}
func BenchmarkGaugeObserverObservationInt64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meterMust().NewInt64GaugeObserver("test.lastvalue", func(_ context.Context, result metric.Int64ObserverResult) {
for i := 0; i < b.N; i++ {
result.Observe((int64)(i), labs...)
}
})
b.ResetTimer()
fix.accumulator.Collect(ctx)
}
func BenchmarkGaugeObserverObservationFloat64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meterMust().NewFloat64GaugeObserver("test.lastvalue", func(_ context.Context, result metric.Float64ObserverResult) {
for i := 0; i < b.N; i++ {
result.Observe((float64)(i), labs...)
}
})
b.ResetTimer()
fix.accumulator.Collect(ctx)
}
// Exact
func BenchmarkInt64ExactAdd(b *testing.B) {
benchmarkInt64HistogramAdd(b, "int64.exact")
}
func BenchmarkFloat64ExactAdd(b *testing.B) {
benchmarkFloat64HistogramAdd(b, "float64.exact")
}
// BatchRecord
func benchmarkBatchRecord8Labels(b *testing.B, numInst int) {
const numLabels = 8
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(numLabels)
var meas []sdkapi.Measurement
for i := 0; i < numInst; i++ {
inst := fix.meterMust().NewInt64Counter(fmt.Sprintf("int64.%d.sum", i))
meas = append(meas, inst.Measurement(1))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.accumulator.RecordBatch(ctx, labs, meas...)
}
}
func BenchmarkBatchRecord8Labels_1Instrument(b *testing.B) {
benchmarkBatchRecord8Labels(b, 1)
}
func BenchmarkBatchRecord_8Labels_2Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 2)
}
func BenchmarkBatchRecord_8Labels_4Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 4)
}
func BenchmarkBatchRecord_8Labels_8Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 8)
}
// Record creation
func BenchmarkRepeatedDirectCalls(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
c := fix.meterMust().NewInt64Counter("int64.sum")
k := attribute.String("bench", "true")
b.ResetTimer()
for i := 0; i < b.N; i++ {
c.Add(ctx, 1, k)
fix.accumulator.Collect(ctx)
}
}
| 1 | 17,198 | Does the name suffix hardcode something? Not clear how changing the name fixes this. | open-telemetry-opentelemetry-go | go |
@@ -39,4 +39,14 @@ public interface JmxExecutorManagerMBean {
@DisplayName("OPERATION: getPrimaryExecutorHostPorts")
public List<String> getPrimaryExecutorHostPorts();
+
+ @DisplayName("OPERATION: isQueueProcessorActive")
+ public boolean isQueueProcessorActive();
+
+ @DisplayName("OPERATION: getUndispatchedFlows")
+ public String getUndispatchedFlows();
+
+ @DisplayName("OPERATION: getQueueProcessorThreadState")
+ public String getQueueProcessorThreadState();
+
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.jmx;
import java.util.List;
public interface JmxExecutorManagerMBean {
@DisplayName("OPERATION: getNumRunningFlows")
public int getNumRunningFlows();
@DisplayName("OPERATION: getRunningFlows")
public String getRunningFlows();
@DisplayName("OPERATION: getExecutorThreadState")
public String getExecutorThreadState();
@DisplayName("OPERATION: getExecutorThreadStage")
public String getExecutorThreadStage();
@DisplayName("OPERATION: isThreadActive")
public boolean isThreadActive();
@DisplayName("OPERATION: getLastThreadCheckTime")
public Long getLastThreadCheckTime();
@DisplayName("OPERATION: getPrimaryExecutorHostPorts")
public List<String> getPrimaryExecutorHostPorts();
}
| 1 | 10,615 | Is undispatched same as queued? getQueuedFlows? | azkaban-azkaban | java |
@@ -1,5 +1,5 @@
/**
- * core/site data store: connection info tests.
+ * Core site data store: connection info tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
* | 1 | /**
* core/site data store: connection info tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import API from 'googlesitekit-api';
import {
createTestRegistry,
muteConsole,
muteFetch,
subscribeUntil,
unsubscribeFromAll,
} from '../../../../../tests/js/utils';
import { STORE_NAME } from './constants';
describe( 'core/site connection', () => {
const responseConnected = { connected: true, resettable: true, setupCompleted: true };
let registry;
let select;
let store;
beforeAll( () => {
API.setUsingCache( false );
} );
beforeEach( () => {
registry = createTestRegistry();
store = registry.stores[ STORE_NAME ].store;
select = registry.select( STORE_NAME );
} );
afterAll( () => {
API.setUsingCache( true );
} );
afterEach( () => {
unsubscribeFromAll( registry );
} );
describe( 'actions', () => {
describe( 'fetchGetConnection', () => {
it( 'does not require any params', () => {
expect( () => {
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/connection/ );
registry.dispatch( STORE_NAME ).fetchGetConnection();
} ).not.toThrow();
} );
} );
describe( 'receiveGetConnection', () => {
it( 'requires the response param', () => {
expect( () => {
registry.dispatch( STORE_NAME ).receiveGetConnection();
} ).toThrow( 'response is required.' );
} );
it( 'receives and sets connection ', async () => {
const connection = { coolSite: true };
await registry.dispatch( STORE_NAME ).receiveGetConnection( connection );
const state = store.getState();
expect( state ).toMatchObject( { connection } );
} );
} );
} );
describe( 'selectors', () => {
describe( 'getConnection', () => {
it( 'uses a resolver to make a network request', async () => {
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: responseConnected, status: 200 }
);
const initialConnection = select.getConnection();
// The connection info will be its initial value while the connection
// info is fetched.
expect( initialConnection ).toEqual( undefined );
await subscribeUntil( registry,
() => (
select.getConnection() !== undefined
),
);
const connection = select.getConnection();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( connection ).toEqual( responseConnected );
const connectionSelect = select.getConnection();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( connectionSelect ).toEqual( connection );
} );
it( 'does not make a network request if data is already in state', async () => {
registry.dispatch( STORE_NAME ).receiveGetConnection( responseConnected, {} );
const connection = select.getConnection();
await subscribeUntil( registry, () => registry
.select( STORE_NAME )
.hasFinishedResolution( 'getConnection' )
);
expect( fetchMock ).not.toHaveFetched();
expect( connection ).toEqual( responseConnected );
} );
it( 'dispatches an error if the request fails', async () => {
const response = {
code: 'internal_server_error',
message: 'Internal server error',
data: { status: 500 },
};
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: response, status: 500 }
);
muteConsole( 'error' );
select.getConnection();
await subscribeUntil( registry,
// TODO: We may want a selector for this, but for now this is fine
// because it's internal-only.
() => select.isFetchingGetConnection() === false,
);
const connection = select.getConnection();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( connection ).toEqual( undefined );
} );
} );
describe( 'isConnected', () => {
it( 'uses a resolver get all connection info', async () => {
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: responseConnected, status: 200 }
);
const initialIsConnected = select.isConnected();
// The connection info will be its initial value while the connection
// info is fetched.
expect( initialIsConnected ).toEqual( undefined );
await subscribeUntil( registry,
() => (
select.isConnected() !== undefined
),
);
const isConnected = select.isConnected();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isConnected ).toEqual( responseConnected.connected );
} );
it( 'dispatches an error if the request fails', async () => {
const response = {
code: 'internal_server_error',
message: 'Internal server error',
data: { status: 500 },
};
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: response, status: 500 }
);
muteConsole( 'error' );
select.isConnected();
await subscribeUntil( registry,
// TODO: We may want a selector for this, but for now this is fine
// because it's internal-only.
() => select.isFetchingGetConnection() === false,
);
const isConnected = select.isConnected();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isConnected ).toEqual( undefined );
} );
it( 'returns undefined if connection info is not available', async () => {
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/connection/ );
const isConnected = select.isConnected();
expect( isConnected ).toEqual( undefined );
} );
} );
describe( 'isResettable', () => {
it( 'uses a resolver get all connection info', async () => {
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: responseConnected, status: 200 }
);
const initialIsResettable = select.isResettable();
// The connection info will be its initial value while the connection
// info is fetched.
expect( initialIsResettable ).toEqual( undefined );
await subscribeUntil( registry,
() => (
select.isResettable() !== undefined
),
);
const isResettable = select.isResettable();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isResettable ).toEqual( responseConnected.resettable );
} );
it( 'dispatches an error if the request fails', async () => {
const response = {
code: 'internal_server_error',
message: 'Internal server error',
data: { status: 500 },
};
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: response, status: 500 }
);
muteConsole( 'error' );
select.isResettable();
await subscribeUntil( registry,
// TODO: We may want a selector for this, but for now this is fine
// because it's internal-only.
() => select.isFetchingGetConnection() === false,
);
const isResettable = select.isResettable();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isResettable ).toEqual( undefined );
} );
it( 'returns undefined if connection info is not available', async () => {
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/connection/ );
const isResettable = select.isResettable();
expect( isResettable ).toEqual( undefined );
} );
} );
describe( 'isSetupCompleted', () => {
it( 'uses a resolver get all connection info', async () => {
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: responseConnected, status: 200 }
);
const initialIsSetupCompleted = select.isSetupCompleted();
// The connection info will be its initial value while the connection
// info is fetched.
expect( initialIsSetupCompleted ).toEqual( undefined );
await subscribeUntil( registry,
() => (
select.isSetupCompleted() !== undefined
),
);
const isSetupCompleted = select.isSetupCompleted();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isSetupCompleted ).toEqual( responseConnected.setupCompleted );
} );
it( 'dispatches an error if the request fails', async () => {
const response = {
code: 'internal_server_error',
message: 'Internal server error',
data: { status: 500 },
};
fetchMock.getOnce(
/^\/google-site-kit\/v1\/core\/site\/data\/connection/,
{ body: response, status: 500 }
);
muteConsole( 'error' );
select.isSetupCompleted();
await subscribeUntil( registry,
// TODO: We may want a selector for this, but for now this is fine
// because it's internal-only.
() => select.isFetchingGetConnection() === false,
);
const isSetupCompleted = select.isSetupCompleted();
expect( fetchMock ).toHaveFetchedTimes( 1 );
expect( isSetupCompleted ).toEqual( undefined );
} );
it( 'returns undefined if connection info is not available', async () => {
muteFetch( /^\/google-site-kit\/v1\/core\/site\/data\/connection/ );
const isSetupCompleted = select.isSetupCompleted();
expect( isSetupCompleted ).toEqual( undefined );
} );
} );
} );
} );
| 1 | 32,207 | See above, same for all similar cases below. | google-site-kit-wp | js |
@@ -80,13 +80,14 @@ func TestSingleImageImportLoggableBuilder(t *testing.T) {
uefiBootable: isUEFIDetectedValue,
biosBootable: biosBootableValue,
},
- traceLogs: traceLogs,
+ traceLogs: append(traceLogs, traceLogs...),
}
assert.Equal(t, expected, NewSingleImageImportLoggableBuilder().
SetDiskAttributes(format, sourceGb, targetGb).
SetUEFIMetrics(isUEFICompatibleImageValue, isUEFIDetectedValue, biosBootableValue, bootFSValue).
SetInflationAttributes(matchResultValue, inflationTypeValue, inflationTimeValue, shadowInflationTimeValue).
- SetTraceLogs(traceLogs).
+ AppendTraceLogs(traceLogs).
+ AppendTraceLogs(traceLogs).
Build())
}
} | 1 | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLiteralLoggable_GetValueAsInt64Slice(t *testing.T) {
loggable := literalLoggable{
int64s: map[string][]int64{
"gb": {1, 2, 3},
},
}
assert.Equal(t, []int64{1, 2, 3}, loggable.GetValueAsInt64Slice("gb"))
assert.Empty(t, loggable.GetValueAsInt64Slice("not-there"))
}
func TestLiteralLoggable_GetValue(t *testing.T) {
loggable := literalLoggable{
strings: map[string]string{"hello": "world"},
}
assert.Equal(t, "world", loggable.GetValue("hello"))
assert.Empty(t, loggable.GetValue("not-there"))
}
func TestLiteralLoggable_ReadSerialPortLogs(t *testing.T) {
loggable := literalLoggable{
traceLogs: []string{"log-a", "log-b"},
}
assert.Equal(t, []string{"log-a", "log-b"}, loggable.ReadSerialPortLogs())
}
func TestSingleImageImportLoggableBuilder(t *testing.T) {
format := "vmdk"
sourceGb := int64(12)
targetGb := int64(100)
traceLogs := []string{"log-a", "log-b"}
inflationTypeValue := "qemu"
inflationTimeValue := int64(10000)
shadowInflationTimeValue := int64(5000)
matchResultValue := "true"
bootFSValue := "btrfs"
for _, isUEFICompatibleImageValue := range []bool{true, false} {
for _, isUEFIDetectedValue := range []bool{true, false} {
for _, biosBootableValue := range []bool{true, false} {
expected := literalLoggable{
strings: map[string]string{
importFileFormat: format,
inflationType: inflationTypeValue,
shadowDiskMatchResult: matchResultValue,
rootFS: bootFSValue,
},
int64s: map[string][]int64{
sourceSizeGb: {sourceGb},
targetSizeGb: {targetGb},
inflationTime: {inflationTimeValue},
shadowInflationTime: {shadowInflationTimeValue},
},
bools: map[string]bool{
isUEFICompatibleImage: isUEFICompatibleImageValue,
isUEFIDetected: isUEFIDetectedValue,
uefiBootable: isUEFIDetectedValue,
biosBootable: biosBootableValue,
},
traceLogs: traceLogs,
}
assert.Equal(t, expected, NewSingleImageImportLoggableBuilder().
SetDiskAttributes(format, sourceGb, targetGb).
SetUEFIMetrics(isUEFICompatibleImageValue, isUEFIDetectedValue, biosBootableValue, bootFSValue).
SetInflationAttributes(matchResultValue, inflationTypeValue, inflationTimeValue, shadowInflationTimeValue).
SetTraceLogs(traceLogs).
Build())
}
}
}
}
| 1 | 12,505 | Why is this done twice? | GoogleCloudPlatform-compute-image-tools | go |
@@ -55,6 +55,11 @@ func (it *DeadlineReconciler) Reconcile(request reconcile.Request) (reconcile.Re
return reconcile.Result{}, client.IgnoreNotFound(err)
}
+ if ConditionEqualsTo(node.Status, v1alpha1.ConditionDeadlineExceed, corev1.ConditionTrue) {
+ // if this node deadline is exceed, try propagating to children node
+ return reconcile.Result{}, it.propagateDeadlineToChildren(ctx, &node)
+ }
+
if node.Spec.Deadline == nil {
return reconcile.Result{}, nil
} | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/recorder"
)
type DeadlineReconciler struct {
*ChildNodesFetcher
kubeClient client.Client
eventRecorder recorder.ChaosRecorder
logger logr.Logger
}
func NewDeadlineReconciler(kubeClient client.Client, eventRecorder recorder.ChaosRecorder, logger logr.Logger) *DeadlineReconciler {
return &DeadlineReconciler{
ChildNodesFetcher: NewChildNodesFetcher(kubeClient, logger),
kubeClient: kubeClient,
eventRecorder: eventRecorder,
logger: logger}
}
func (it *DeadlineReconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
ctx := context.TODO()
node := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &node)
if err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
if node.Spec.Deadline == nil {
return reconcile.Result{}, nil
}
now := metav1.NewTime(time.Now())
if node.Spec.Deadline.Before(&now) {
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &nodeNeedUpdate)
if err != nil {
return err
}
if ConditionEqualsTo(nodeNeedUpdate.Status, v1alpha1.ConditionDeadlineExceed, corev1.ConditionTrue) {
// no need to update
return nil
}
var reason string
if ConditionEqualsTo(nodeNeedUpdate.Status, v1alpha1.ConditionAccomplished, corev1.ConditionTrue) {
reason = v1alpha1.NodeDeadlineOmitted
} else {
reason = v1alpha1.NodeDeadlineExceed
}
if !ConditionEqualsTo(nodeNeedUpdate.Status, v1alpha1.ConditionDeadlineExceed, corev1.ConditionTrue) && reason == v1alpha1.NodeDeadlineExceed {
it.eventRecorder.Event(&node, recorder.DeadlineExceed{})
}
SetCondition(&nodeNeedUpdate.Status, v1alpha1.WorkflowNodeCondition{
Type: v1alpha1.ConditionDeadlineExceed,
Status: corev1.ConditionTrue,
Reason: reason,
})
return it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
})
if updateError != nil {
return reconcile.Result{}, updateError
}
it.logger.Info("deadline exceed", "key", request.NamespacedName, "deadline", node.Spec.Deadline.Time)
} else {
updateError := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, request.NamespacedName, &nodeNeedUpdate)
if err != nil {
return err
}
if ConditionEqualsTo(nodeNeedUpdate.Status, v1alpha1.ConditionDeadlineExceed, corev1.ConditionFalse) {
// no need to update
return nil
}
SetCondition(&nodeNeedUpdate.Status, v1alpha1.WorkflowNodeCondition{
Type: v1alpha1.ConditionDeadlineExceed,
Status: corev1.ConditionFalse,
Reason: v1alpha1.NodeDeadlineNotExceed,
})
return it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
})
if updateError != nil {
return reconcile.Result{}, updateError
}
duration := node.Spec.Deadline.Time.Sub(now.Time)
it.logger.Info("deadline not exceed, requeue after a while", "key", request.NamespacedName, "deadline", node.Spec.Deadline.Time,
"duration", duration)
return reconcile.Result{
RequeueAfter: duration,
}, nil
}
if ConditionEqualsTo(node.Status, v1alpha1.ConditionDeadlineExceed, corev1.ConditionTrue) {
// if this node deadline is exceed, try propagating to children node
return reconcile.Result{}, it.propagateDeadlineToChildren(ctx, &node)
}
return reconcile.Result{}, nil
}
func (it *DeadlineReconciler) propagateDeadlineToChildren(ctx context.Context, parent *v1alpha1.WorkflowNode) error {
switch parent.Spec.Type {
case v1alpha1.TypeSerial, v1alpha1.TypeParallel, v1alpha1.TypeTask:
activeChildNodes, _, err := it.ChildNodesFetcher.fetchChildNodes(ctx, *parent)
if err != nil {
return err
}
for _, childNode := range activeChildNodes {
childNode := childNode
if WorkflowNodeFinished(childNode.Status) {
it.logger.V(4).Info("child node already finished, skip for propagate deadline", "node", fmt.Sprintf("%s/%s", childNode.Namespace, childNode.Name))
continue
}
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
nodeNeedUpdate := v1alpha1.WorkflowNode{}
err := it.kubeClient.Get(ctx, types.NamespacedName{
Namespace: childNode.Namespace,
Name: childNode.Name,
}, &nodeNeedUpdate)
if err != nil {
return err
}
SetCondition(&nodeNeedUpdate.Status, v1alpha1.WorkflowNodeCondition{
Type: v1alpha1.ConditionDeadlineExceed,
Status: corev1.ConditionTrue,
Reason: v1alpha1.ParentNodeDeadlineExceed,
})
it.eventRecorder.Event(&nodeNeedUpdate, recorder.ParentNodeDeadlineExceed{ParentNodeName: parent.Name})
return it.kubeClient.Status().Update(ctx, &nodeNeedUpdate)
})
if err != nil {
return err
}
it.logger.Info("propagate deadline for child node",
"child node", fmt.Sprintf("%s/%s", childNode.Namespace, childNode.Name),
"parent node", fmt.Sprintf("%s/%s", parent.Namespace, parent.Name),
)
}
return nil
default:
it.logger.V(4).Info("no need to propagate with this type of workflow node", "type", parent.Spec.Type)
return nil
}
}
| 1 | 23,320 | This sync can ensure that the condition of the child node is consistent with the parent node, but I still don't understand when the child will be overwritten, and this behavior is not what we expected? | chaos-mesh-chaos-mesh | go |
@@ -175,8 +175,8 @@ func TestEnvironment(state *BuildState, target *BuildTarget, testDir string) Bui
if target.HasLabel("cc") {
env = append(env, "GCNO_DIR="+path.Join(RepoRoot, GenDir, target.Label.PackageName))
}
- if state.DebugTests {
- env = append(env, "DEBUG=true")
+ if state.DebugFailingTests {
+ env = append(env, "DEBUG_TEST_FAILURE=true")
}
if target.Test.Sandbox && len(state.Config.Sandbox.Dir) > 0 {
env = append(env, "SANDBOX_DIRS="+strings.Join(state.Config.Sandbox.Dir, ",")) | 1 | package core
import (
"encoding/base64"
"fmt"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/thought-machine/please/src/fs"
"github.com/thought-machine/please/src/scm"
)
// A BuildEnv is a representation of the build environment that also knows how to log itself.
type BuildEnv []string
// GeneralBuildEnvironment creates the shell env vars used for a command, not based
// on any specific target etc.
func GeneralBuildEnvironment(state *BuildState) BuildEnv {
env := BuildEnv{
// Need this for certain tools, for example sass
"LANG=" + state.Config.Build.Lang,
// Need to know these for certain rules.
"ARCH=" + state.Arch.Arch,
"OS=" + state.Arch.OS,
// These are slightly modified forms that are more convenient for some things.
"XARCH=" + state.Arch.XArch(),
"XOS=" + state.Arch.XOS(),
// It's easier to just make these available for Go-based rules.
"GOARCH=" + state.Arch.GoArch(),
"GOOS=" + state.Arch.OS,
}
if state.Config.Cpp.PkgConfigPath != "" {
env = append(env, "PKG_CONFIG_PATH="+state.Config.Cpp.PkgConfigPath)
}
return append(env, state.Config.GetBuildEnv()...)
}
// TargetEnvironment returns the basic parts of the build environment.
func TargetEnvironment(state *BuildState, target *BuildTarget) BuildEnv {
env := append(GeneralBuildEnvironment(state),
"PKG="+target.Label.PackageName,
"PKG_DIR="+target.Label.PackageDir(),
"NAME="+target.Label.Name,
)
if state.Config.Remote.URL == "" || target.Local {
// Expose the requested build config, but it is not available for remote execution.
// TODO(peterebden): Investigate removing these env vars completely.
env = append(env, "BUILD_CONFIG="+state.Config.Build.Config, "CONFIG="+state.Config.Build.Config)
}
if target.PassUnsafeEnv != nil {
for _, e := range *target.PassUnsafeEnv {
env = append(env, e+"="+os.Getenv(e))
}
}
if target.PassEnv != nil {
for _, e := range *target.PassEnv {
env = append(env, e+"="+os.Getenv(e))
}
}
return env
}
// BuildEnvironment creates the shell env vars to be passed into the exec.Command calls made by plz.
// Note that we lie about the location of HOME in order to keep some tools happy.
// We read this as being slightly more POSIX-compliant than not having it set at all...
func BuildEnvironment(state *BuildState, target *BuildTarget, tmpDir string) BuildEnv {
env := TargetEnvironment(state, target)
sources := target.AllSourcePaths(state.Graph)
outEnv := target.GetTmpOutputAll(target.Outputs())
abs := path.IsAbs(tmpDir)
env = append(env,
"TMP_DIR="+tmpDir,
"TMPDIR="+tmpDir,
"SRCS="+strings.Join(sources, " "),
"OUTS="+strings.Join(outEnv, " "),
"HOME="+tmpDir,
// Set a consistent hash seed for Python. Important for build determinism.
"PYTHONHASHSEED=42",
)
// The OUT variable is only available on rules that have a single output.
if len(outEnv) == 1 {
env = append(env, "OUT="+resolveOut(outEnv[0], tmpDir, target.Sandbox))
}
// The SRC variable is only available on rules that have a single source file.
if len(sources) == 1 {
env = append(env, "SRC="+sources[0])
}
// Named source groups if the target declared any.
for name, srcs := range target.NamedSources {
paths := target.SourcePaths(state.Graph, srcs)
// TODO(macripps): Quote these to prevent spaces from breaking everything (consider joining with NUL or sth?)
env = append(env, "SRCS_"+strings.ToUpper(name)+"="+strings.Join(paths, " "))
}
// Named output groups similarly.
for name, outs := range target.DeclaredNamedOutputs() {
outs = target.GetTmpOutputAll(outs)
env = append(env, "OUTS_"+strings.ToUpper(name)+"="+strings.Join(outs, " "))
}
// Tools
env = append(env, toolsEnv(state, target.AllTools(), target.namedTools, "", abs)...)
// Secrets, again only if they declared any.
if len(target.Secrets) > 0 {
secrets := "SECRETS=" + fs.ExpandHomePath(strings.Join(target.Secrets, ":"))
secrets = strings.ReplaceAll(secrets, ":", " ")
env = append(env, secrets)
}
// NamedSecrets, if they declared any.
for name, secrets := range target.NamedSecrets {
secrets := "SECRETS_" + strings.ToUpper(name) + "=" + fs.ExpandHomePath(strings.Join(secrets, ":"))
secrets = strings.ReplaceAll(secrets, ":", " ")
env = append(env, secrets)
}
if target.Sandbox && len(state.Config.Sandbox.Dir) > 0 {
env = append(env, "SANDBOX_DIRS="+strings.Join(state.Config.Sandbox.Dir, ","))
}
if state.Config.Bazel.Compatibility {
// Obviously this is only a subset of the variables Bazel would expose, but there's
// no point populating ones that we literally have no clue what they should be.
// To be honest I don't terribly like these, I'm pretty sure that using $GENDIR in
// your genrule is not a good sign.
env = append(env, "GENDIR="+path.Join(RepoRoot, GenDir))
env = append(env, "BINDIR="+path.Join(RepoRoot, BinDir))
}
return withUserProvidedEnv(target, env)
}
// userEnv adds the env variables passed to the build rule to the build env
// Sadly this can't be done as part of TargetEnv() target env as this requires the other
// env vars are set so they can be substituted.
func withUserProvidedEnv(target *BuildTarget, env BuildEnv) BuildEnv {
for k, v := range target.Env {
for _, kv := range env {
i := strings.Index(kv, "=")
key, value := kv[:i], kv[(i+1):]
v = strings.ReplaceAll(v, "$"+key, value)
}
env = append(env, fmt.Sprintf("%s=%s", k, v))
}
return env
}
// TestEnvironment creates the environment variables for a test.
func TestEnvironment(state *BuildState, target *BuildTarget, testDir string) BuildEnv {
env := RuntimeEnvironment(state, target, path.IsAbs(testDir), true)
resultsFile := path.Join(testDir, TestResultsFile)
env = append(env,
"TEST_DIR="+testDir,
"TMP_DIR="+testDir,
"TMPDIR="+testDir,
"HOME="+testDir,
"TEST_ARGS="+strings.Join(state.TestArgs, ","),
"RESULTS_FILE="+resultsFile,
// We shouldn't really have specific things like this here, but it really is just easier to set it.
"GTEST_OUTPUT=xml:"+resultsFile,
"PEX_NOCACHE=true",
)
if state.NeedCoverage && !target.HasAnyLabel(state.Config.Test.DisableCoverage) {
env = append(env,
"COVERAGE=true",
"COVERAGE_FILE="+path.Join(testDir, CoverageFile),
)
}
if len(target.Outputs()) > 0 {
env = append(env, "TEST="+resolveOut(target.Outputs()[0], testDir, target.Test.Sandbox))
}
// Bit of a hack for gcov which needs access to its .gcno files.
if target.HasLabel("cc") {
env = append(env, "GCNO_DIR="+path.Join(RepoRoot, GenDir, target.Label.PackageName))
}
if state.DebugTests {
env = append(env, "DEBUG=true")
}
if target.Test.Sandbox && len(state.Config.Sandbox.Dir) > 0 {
env = append(env, "SANDBOX_DIRS="+strings.Join(state.Config.Sandbox.Dir, ","))
}
return withUserProvidedEnv(target, env)
}
// RunEnvironment creates the environment variables for a `plz run --env`.
func RunEnvironment(state *BuildState, target *BuildTarget, inTmpDir bool) BuildEnv {
env := RuntimeEnvironment(state, target, true, inTmpDir)
outEnv := target.Outputs()
env = append(env, "OUTS="+strings.Join(outEnv, " "))
// The OUT variable is only available on rules that have a single output.
if len(outEnv) == 1 {
env = append(env, "OUT="+resolveOut(outEnv[0], ".", false))
}
return withUserProvidedEnv(target, env)
}
// ExecEnvironment creates the environment variables for a `plz exec`.
func ExecEnvironment(state *BuildState, target *BuildTarget, execDir string) BuildEnv {
env := append(RuntimeEnvironment(state, target, true, true),
"TMP_DIR="+execDir,
"TMPDIR="+execDir,
"HOME="+execDir,
// This is used by programs that use display terminals for correct handling
// of input and output in the terminal where the program is run.
"TERM="+os.Getenv("TERM"),
)
outEnv := target.Outputs()
// OUTS/OUT environment variables being always set is for backwards-compatibility.
// Ideally, if the target is a test these variables shouldn't be set.
env = append(env, "OUTS="+strings.Join(outEnv, " "))
if len(outEnv) == 1 {
env = append(env, "OUT="+resolveOut(outEnv[0], ".", target.Sandbox))
if target.IsTest() {
env = append(env, "TEST="+resolveOut(outEnv[0], ".", target.Test.Sandbox))
}
}
return withUserProvidedEnv(target, env)
}
// RuntimeEnvironment is the base environment for runtime-based environments.
// Tools and data env variables are made available.
func RuntimeEnvironment(state *BuildState, target *BuildTarget, abs, inTmpDir bool) BuildEnv {
env := TargetEnvironment(state, target)
// Data
env = append(env, dataEnv(state, target.AllData(), target.NamedData, "", inTmpDir)...)
if target.IsTest() {
// Test tools
env = append(env, toolsEnv(state, target.AllTestTools(), target.NamedTestTools(), "", abs)...)
}
if target.Debug != nil {
prefix := "DEBUG_"
// Debug data
env = append(env, dataEnv(state, target.AllDebugData(), target.DebugNamedData(), prefix, inTmpDir)...)
// Debug tools
env = append(env, toolsEnv(state, target.AllDebugTools(), target.Debug.namedTools, prefix, abs)...)
}
return env
}
// Handles resolution of OUT files
func resolveOut(out string, dir string, sandbox bool) string {
// Bit of a hack; ideally we would be unaware of the sandbox here.
if sandbox && runtime.GOOS == "linux" && !strings.HasPrefix(RepoRoot, "/tmp/") && dir != "." {
return path.Join(SandboxDir, out)
}
return path.Join(dir, out)
}
// Creates tool-related env variables
func toolsEnv(state *BuildState, allTools []BuildInput, namedTools map[string][]BuildInput, prefix string, abs bool) BuildEnv {
env := BuildEnv{
prefix + "TOOLS=" + strings.Join(toolPaths(state, allTools, abs), " "),
}
if len(allTools) == 1 {
env = append(env, prefix+"TOOL="+toolPath(state, allTools[0], abs))
}
for name, tools := range namedTools {
env = append(env, prefix+"TOOLS_"+strings.ToUpper(name)+"="+strings.Join(toolPaths(state, tools, abs), " "))
}
return env
}
// Creates data-related env variables
func dataEnv(state *BuildState, allData []BuildInput, namedData map[string][]BuildInput, prefix string, inTmpDir bool) BuildEnv {
env := BuildEnv{
prefix + "DATA=" + strings.Join(runtimeDataPaths(state.Graph, allData, !inTmpDir), " "),
}
for name, data := range namedData {
env = append(env, prefix+"DATA_"+strings.ToUpper(name)+"="+strings.Join(runtimeDataPaths(state.Graph, data, !inTmpDir), " "))
}
return env
}
func runtimeDataPaths(graph *BuildGraph, data []BuildInput, fullPath bool) []string {
paths := make([]string, 0, len(data))
for _, in := range data {
if fullPath {
paths = append(paths, in.FullPaths(graph)...)
} else {
paths = append(paths, in.Paths(graph)...)
}
}
return paths
}
// StampedBuildEnvironment returns the shell env vars to be passed into exec.Command.
// Optionally includes a stamp if asked.
func StampedBuildEnvironment(state *BuildState, target *BuildTarget, stamp []byte, tmpDir string, shouldStamp bool) BuildEnv {
env := BuildEnvironment(state, target, tmpDir)
encStamp := base64.RawURLEncoding.EncodeToString(stamp)
if shouldStamp {
stampEnvOnce.Do(initStampEnv)
env = append(env, stampEnv...)
env = append(env, "STAMP_FILE="+target.StampFileName())
env = append(env, "STAMP="+encStamp)
}
return append(env, "RULE_HASH="+encStamp)
}
// stampEnv is the generic (i.e. non-target-specific) environment variables we pass to a
// build rule marked with stamp=True.
var stampEnv BuildEnv
var stampEnvOnce sync.Once
func initStampEnv() {
repoScm := scm.NewFallback(RepoRoot)
var wg sync.WaitGroup
var revision, commitDate, describe string
wg.Add(2)
go func() {
revision = repoScm.CurrentRevIdentifier()
describe = repoScm.DescribeIdentifier(revision)
wg.Done()
}()
go func() {
commitDate = repoScm.CurrentRevDate("20060102")
wg.Done()
}()
wg.Wait()
stampEnv = BuildEnv{
"SCM_COMMIT_DATE=" + commitDate,
"SCM_REVISION=" + revision,
"SCM_DESCRIBE=" + describe,
}
}
func toolPath(state *BuildState, tool BuildInput, abs bool) string {
if label, ok := tool.Label(); ok {
entryPoint := ""
if o, ok := tool.(AnnotatedOutputLabel); ok {
entryPoint = o.Annotation
}
path := state.Graph.TargetOrDie(label).toolPath(abs, entryPoint)
if !strings.Contains(path, "/") {
path = "./" + path
}
return path
} else if abs {
return tool.Paths(state.Graph)[0]
}
return tool.LocalPaths(state.Graph)[0]
}
func toolPaths(state *BuildState, tools []BuildInput, abs bool) []string {
ret := make([]string, len(tools))
for i, tool := range tools {
ret[i] = toolPath(state, tool, abs)
}
return ret
}
// ReplaceEnvironment is a function suitable for passing to os.Expand to replace environment
// variables from this BuildEnv.
func (env BuildEnv) ReplaceEnvironment(s string) string {
for _, e := range env {
if strings.HasPrefix(e, s+"=") {
return e[len(s)+1:]
}
}
return ""
}
// Replace replaces the value of the given variable in this BuildEnv.
func (env BuildEnv) Replace(key, value string) {
key += "="
for i, e := range env {
if strings.HasPrefix(e, key) {
env[i] = key + value
}
}
}
// Redacted implements the interface for our logging implementation.
func (env BuildEnv) Redacted() interface{} {
r := make(BuildEnv, len(env))
for i, e := range env {
r[i] = e
split := strings.SplitN(e, "=", 2)
if len(split) == 2 && (strings.Contains(split[0], "SECRET") || strings.Contains(split[0], "PASSWORD")) {
r[i] = split[0] + "=" + "************"
}
}
return r
}
// String implements the fmt.Stringer interface
func (env BuildEnv) String() string {
return strings.Join(env, "\n")
}
| 1 | 10,277 | This was renamed to avoid any confusion with the more general case of debugging via `plz debug` | thought-machine-please | go |
@@ -88,9 +88,11 @@ public class Converter {
static byte[] convertUtf8ToBytes(Object val, int prefixLength) {
requireNonNull(val, "val is null");
if (val instanceof byte[]) {
- return new String((byte[]) val).substring(0, prefixLength).getBytes(StandardCharsets.UTF_8);
+ return new String((byte[]) val).substring(0, Math.min(((byte[]) val).length, prefixLength))
+ .getBytes(StandardCharsets.UTF_8);
} else if (val instanceof String) {
- return ((String) val).substring(0, prefixLength).getBytes(StandardCharsets.UTF_8);
+ return ((String) val).substring(0, Math.min(((String) val).length(), prefixLength))
+ .getBytes(StandardCharsets.UTF_8);
}
throw new TypeException(
String.format("Cannot cast %s to bytes", val.getClass().getSimpleName())); | 1 | /*
*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.pingcap.tikv.types;
import static com.pingcap.tikv.types.TimeType.HOUR;
import static com.pingcap.tikv.types.TimeType.MICROSECOND;
import static com.pingcap.tikv.types.TimeType.MINUTE;
import static com.pingcap.tikv.types.TimeType.SECOND;
import static java.util.Objects.requireNonNull;
import com.pingcap.tikv.exception.TypeException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.sql.Date;
import java.sql.Timestamp;
import java.util.Arrays;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
public class Converter {
public static long convertToLong(Object val) {
requireNonNull(val, "val is null");
if (val instanceof Number) {
return ((Number) val).longValue();
} else if (val instanceof String) {
return Long.parseLong(val.toString());
}
throw new TypeException(
String.format("Cannot cast %s to long", val.getClass().getSimpleName()));
}
public static double convertToDouble(Object val) {
requireNonNull(val, "val is null");
if (val instanceof Number) {
return ((Number) val).doubleValue();
} else if (val instanceof String) {
return Double.parseDouble(val.toString());
}
throw new TypeException(
String.format("Cannot cast %s to double", val.getClass().getSimpleName()));
}
public static String convertToString(Object val) {
requireNonNull(val, "val is null");
return val.toString();
}
public static byte[] convertToBytes(Object val) {
requireNonNull(val, "val is null");
if (val instanceof byte[]) {
return (byte[]) val;
} else if (val instanceof String) {
return ((String) val).getBytes();
}
throw new TypeException(
String.format("Cannot cast %s to bytes", val.getClass().getSimpleName()));
}
static byte[] convertToBytes(Object val, int prefixLength) {
requireNonNull(val, "val is null");
if (val instanceof byte[]) {
return Arrays.copyOf((byte[]) val, prefixLength);
} else if (val instanceof String) {
return Arrays.copyOf(((String) val).getBytes(), prefixLength);
}
throw new TypeException(
String.format("Cannot cast %s to bytes", val.getClass().getSimpleName()));
}
static byte[] convertUtf8ToBytes(Object val, int prefixLength) {
requireNonNull(val, "val is null");
if (val instanceof byte[]) {
return new String((byte[]) val).substring(0, prefixLength).getBytes(StandardCharsets.UTF_8);
} else if (val instanceof String) {
return ((String) val).substring(0, prefixLength).getBytes(StandardCharsets.UTF_8);
}
throw new TypeException(
String.format("Cannot cast %s to bytes", val.getClass().getSimpleName()));
}
private static final DateTimeZone localTimeZone = DateTimeZone.getDefault();
private static final DateTimeFormatter localDateTimeFormatter =
DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(localTimeZone);
private static final DateTimeFormatter localDateFormatter =
DateTimeFormat.forPattern("yyyy-MM-dd").withZone(localTimeZone);
public static final DateTimeFormatter UTC_TIME_FORMATTER =
DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC);
public static DateTimeZone getLocalTimezone() {
return localTimeZone;
}
public static DateTime strToDateTime(String value, DateTimeFormatter formatter) {
return DateTime.parse(value, formatter);
}
/**
* Convert an object to Datetime If constant is a string, it parses as local timezone If it is an
* long, it parsed as UTC epoch
*
* @param val value to be converted to DateTime
* @return joda.time.DateTime indicating local Datetime
*/
public static DateTime convertToDateTime(Object val) {
requireNonNull(val, "val is null");
if (val instanceof DateTime) {
return (DateTime) val;
} else if (val instanceof String) {
// interpret string as in local timezone
try {
return strToDateTime((String) val, localDateTimeFormatter);
} catch (Exception e) {
throw new TypeException(
String.format("Error parsing string %s to datetime", (String) val), e);
}
} else if (val instanceof Long) {
return new DateTime((long) val);
} else if (val instanceof Timestamp) {
return new DateTime(((Timestamp) val).getTime());
} else if (val instanceof Date) {
return new DateTime(((Date) val).getTime());
} else {
throw new TypeException("Can not cast Object to LocalDateTime ");
}
}
/**
* Convert an object to Date If constant is a string, it parses as local timezone If it is an
* long, it parsed as UTC epoch
*
* @param val value to be converted to DateTime
* @return java.sql.Date indicating Date
*/
public static Date convertToDate(Object val) {
requireNonNull(val, "val is null");
if (val instanceof Date) {
return (Date) val;
} else if (val instanceof String) {
try {
return new Date(DateTime.parse((String) val, localDateFormatter).toDate().getTime());
} catch (Exception e) {
throw new TypeException(String.format("Error parsing string %s to date", (String) val), e);
}
} else if (val instanceof Long) {
return new Date((long) val);
} else if (val instanceof Timestamp) {
return new Date(((Timestamp) val).getTime());
} else if (val instanceof DateTime) {
return new Date(((DateTime) val).getMillis());
} else {
throw new TypeException("Can not cast Object to LocalDate");
}
}
public static BigDecimal convertToBigDecimal(Object val) {
requireNonNull(val, "val is null");
if (val instanceof BigDecimal) {
return (BigDecimal) val;
} else if (val instanceof Double || val instanceof Float) {
return new BigDecimal((Double) val);
} else if (val instanceof BigInteger) {
return new BigDecimal((BigInteger) val);
} else if (val instanceof Number) {
return new BigDecimal(((Number) val).longValue());
} else if (val instanceof String) {
return new BigDecimal((String) val);
} else {
throw new TypeException("can not cast non Number type to Double");
}
}
public static String convertDurationToStr(long nanos, int decimal) {
int sign = 1, hours, minutes, seconds, frac;
if (nanos < 0) {
nanos = -nanos;
sign = -1;
}
hours = (int) (nanos / HOUR);
nanos -= hours * HOUR;
minutes = (int) (nanos / MINUTE);
nanos -= minutes * MINUTE;
seconds = (int) (nanos / SECOND);
nanos -= seconds * SECOND;
frac = (int) (nanos / MICROSECOND);
StringBuilder sb = new StringBuilder();
if (sign < 0) {
sb.append('-');
}
sb.append(String.format("%02d:%02d:%02d", hours, minutes, seconds));
if (decimal > 0) {
sb.append('.');
sb.append(String.format("%06d", frac), 0, decimal);
}
return sb.toString();
}
public static long convertStrToDuration(String value) {
// value should be in form of 12:59:59.000 or 12:59:59
// length expect to be 3.
try {
String[] splitBySemiColon = value.split(":");
if (splitBySemiColon.length != 3)
throw new IllegalArgumentException(
String.format("%s is not a valid time type in mysql", value));
int sign, hour, minute, second, frac;
sign = 1;
hour = Integer.parseInt(splitBySemiColon[0]);
if (hour < 0) {
sign = -1;
hour -= hour;
}
minute = Integer.parseInt(splitBySemiColon[1]);
if (splitBySemiColon[2].contains(".")) {
String[] splitByDot = splitBySemiColon[2].split("\\.");
second = Integer.parseInt(splitByDot[0]);
frac = Integer.parseInt(splitByDot[1]);
} else {
second = Integer.parseInt(splitBySemiColon[2]);
frac = 0;
}
return ((long) hour * HOUR
+ (long) minute * MINUTE
+ (long) second * SECOND
+ (long) frac * MICROSECOND)
* sign;
} catch (Exception e) {
throw new IllegalArgumentException(
String.format(
"%s is not a valid format. Either hh:mm:ss.mmm or hh:mm:ss is accepted.", value));
}
}
}
| 1 | 9,548 | String valStr = (String)val; valStr.substring(0, Math.min(valStr.length(), prefixLength)) Make it clean. | pingcap-tispark | java |
@@ -2067,9 +2067,13 @@ CheckedError Parser::ParseEnum(const bool is_union, EnumDef **dest) {
const auto strict_ascending = (false == opts.proto_mode);
EnumValBuilder evb(*this, *enum_def, strict_ascending);
EXPECT('{');
- // A lot of code generatos expect that an enum is not-empty.
- if ((is_union || Is('}')) && !opts.proto_mode) {
- evb.CreateEnumerator("NONE");
+ if(is_union && !opts.proto_mode) {
+ // Every union has the NONE field, mapped to a special `void` type.
+ evb.CreateEnumerator("NONE", BASE_TYPE_NONE);
+ ECHECK(evb.AcceptEnumerator());
+ } else if (Is('}') && !opts.proto_mode) {
+ // Most code generators expect that an enum is not-empty.
+ evb.CreateEnumerator("NONE", 0);
ECHECK(evb.AcceptEnumerator());
}
std::set<std::pair<BaseType, StructDef *>> union_types; | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <list>
#include <string>
#include <utility>
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
namespace flatbuffers {
// Reflects the version at the compiling time of binary(lib/dll/so).
const char *FLATBUFFERS_VERSION() {
// clang-format off
return
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "."
FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION);
// clang-format on
}
const double kPi = 3.14159265358979323846;
// clang-format off
const char *const kTypeNames[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, ...) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
nullptr
};
const char kTypeSizes[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
sizeof(CTYPE),
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
// clang-format on
// The enums in the reflection schema should match the ones we use internally.
// Compare the last element to check if these go out of sync.
static_assert(BASE_TYPE_UNION == static_cast<BaseType>(reflection::Union),
"enums don't match");
// Any parsing calls have to be wrapped in this macro, which automates
// handling of recursive error checking a bit. It will check the received
// CheckedError object, and return straight away on error.
#define ECHECK(call) \
{ \
auto ce = (call); \
if (ce.Check()) return ce; \
}
// These two functions are called hundreds of times below, so define a short
// form:
#define NEXT() ECHECK(Next())
#define EXPECT(tok) ECHECK(Expect(tok))
static bool ValidateUTF8(const std::string &str) {
const char *s = &str[0];
const char *const sEnd = s + str.length();
while (s < sEnd) {
if (FromUTF8(&s) < 0) { return false; }
}
return true;
}
// Convert an underscore_based_indentifier in to camelCase.
// Also uppercases the first character if first is true.
std::string MakeCamel(const std::string &in, bool first) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (!i && first)
s += static_cast<char>(toupper(in[0]));
else if (in[i] == '_' && i + 1 < in.length())
s += static_cast<char>(toupper(in[++i]));
else
s += in[i];
}
return s;
}
// Convert an underscore_based_identifier in to screaming snake case.
std::string MakeScreamingCamel(const std::string &in) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (in[i] != '_')
s += static_cast<char>(toupper(in[i]));
else
s += in[i];
}
return s;
}
void DeserializeDoc(std::vector<std::string> &doc,
const Vector<Offset<String>> *documentation) {
if (documentation == nullptr) return;
for (uoffset_t index = 0; index < documentation->size(); index++)
doc.push_back(documentation->Get(index)->str());
}
void Parser::Message(const std::string &msg) {
if (!error_.empty()) error_ += "\n"; // log all warnings and errors
error_ += file_being_parsed_.length() ? AbsolutePath(file_being_parsed_) : "";
// clang-format off
#ifdef _WIN32 // MSVC alike
error_ +=
"(" + NumToString(line_) + ", " + NumToString(CursorPosition()) + ")";
#else // gcc alike
if (file_being_parsed_.length()) error_ += ":";
error_ += NumToString(line_) + ": " + NumToString(CursorPosition());
#endif
// clang-format on
error_ += ": " + msg;
}
void Parser::Warning(const std::string &msg) { Message("warning: " + msg); }
CheckedError Parser::Error(const std::string &msg) {
Message("error: " + msg);
return CheckedError(true);
}
inline CheckedError NoError() { return CheckedError(false); }
CheckedError Parser::RecurseError() {
return Error("maximum parsing recursion of " +
NumToString(FLATBUFFERS_MAX_PARSING_DEPTH) + " reached");
}
template<typename F> CheckedError Parser::Recurse(F f) {
if (recurse_protection_counter >= (FLATBUFFERS_MAX_PARSING_DEPTH))
return RecurseError();
recurse_protection_counter++;
auto ce = f();
recurse_protection_counter--;
return ce;
}
template<typename T> std::string TypeToIntervalString() {
return "[" + NumToString((flatbuffers::numeric_limits<T>::lowest)()) + "; " +
NumToString((flatbuffers::numeric_limits<T>::max)()) + "]";
}
// atot: template version of atoi/atof: convert a string to an instance of T.
template<typename T>
inline CheckedError atot(const char *s, Parser &parser, T *val) {
auto done = StringToNumber(s, val);
if (done) return NoError();
if (0 == *val)
return parser.Error("invalid number: \"" + std::string(s) + "\"");
else
return parser.Error("invalid number: \"" + std::string(s) + "\"" +
", constant does not fit " + TypeToIntervalString<T>());
}
template<>
inline CheckedError atot<Offset<void>>(const char *s, Parser &parser,
Offset<void> *val) {
(void)parser;
*val = Offset<void>(atoi(s));
return NoError();
}
std::string Namespace::GetFullyQualifiedName(const std::string &name,
size_t max_components) const {
// Early exit if we don't have a defined namespace.
if (components.empty() || !max_components) { return name; }
std::string stream_str;
for (size_t i = 0; i < std::min(components.size(), max_components); i++) {
if (i) { stream_str += '.'; }
stream_str += std::string(components[i]);
}
if (name.length()) {
stream_str += '.';
stream_str += name;
}
return stream_str;
}
// Declare tokens we'll use. Single character tokens are represented by their
// ascii character code (e.g. '{'), others above 256.
// clang-format off
#define FLATBUFFERS_GEN_TOKENS(TD) \
TD(Eof, 256, "end of file") \
TD(StringConstant, 257, "string constant") \
TD(IntegerConstant, 258, "integer constant") \
TD(FloatConstant, 259, "float constant") \
TD(Identifier, 260, "identifier")
#ifdef __GNUC__
__extension__ // Stop GCC complaining about trailing comma with -Wpendantic.
#endif
enum {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) kToken ## NAME = VALUE,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
};
static std::string TokenToString(int t) {
static const char * const tokens[] = {
#define FLATBUFFERS_TOKEN(NAME, VALUE, STRING) STRING,
FLATBUFFERS_GEN_TOKENS(FLATBUFFERS_TOKEN)
#undef FLATBUFFERS_TOKEN
#define FLATBUFFERS_TD(ENUM, IDLTYPE, ...) \
IDLTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (t < 256) { // A single ascii char token.
std::string s;
s.append(1, static_cast<char>(t));
return s;
} else { // Other tokens.
return tokens[t - 256];
}
}
// clang-format on
std::string Parser::TokenToStringId(int t) const {
return t == kTokenIdentifier ? attribute_ : TokenToString(t);
}
// Parses exactly nibbles worth of hex digits into a number, or error.
CheckedError Parser::ParseHexNum(int nibbles, uint64_t *val) {
FLATBUFFERS_ASSERT(nibbles > 0);
for (int i = 0; i < nibbles; i++)
if (!is_xdigit(cursor_[i]))
return Error("escape code must be followed by " + NumToString(nibbles) +
" hex digits");
std::string target(cursor_, cursor_ + nibbles);
*val = StringToUInt(target.c_str(), 16);
cursor_ += nibbles;
return NoError();
}
CheckedError Parser::SkipByteOrderMark() {
if (static_cast<unsigned char>(*cursor_) != 0xef) return NoError();
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbb)
return Error("invalid utf-8 byte order mark");
cursor_++;
if (static_cast<unsigned char>(*cursor_) != 0xbf)
return Error("invalid utf-8 byte order mark");
cursor_++;
return NoError();
}
static inline bool IsIdentifierStart(char c) {
return is_alpha(c) || (c == '_');
}
CheckedError Parser::Next() {
doc_comment_.clear();
bool seen_newline = cursor_ == source_;
attribute_.clear();
attr_is_trivial_ascii_string_ = true;
for (;;) {
char c = *cursor_++;
token_ = c;
switch (c) {
case '\0':
cursor_--;
token_ = kTokenEof;
return NoError();
case ' ':
case '\r':
case '\t': break;
case '\n':
MarkNewLine();
seen_newline = true;
break;
case '{':
case '}':
case '(':
case ')':
case '[':
case ']':
case ',':
case ':':
case ';':
case '=': return NoError();
case '\"':
case '\'': {
int unicode_high_surrogate = -1;
while (*cursor_ != c) {
if (*cursor_ < ' ' && static_cast<signed char>(*cursor_) >= 0)
return Error("illegal character in string constant");
if (*cursor_ == '\\') {
attr_is_trivial_ascii_string_ = false; // has escape sequence
cursor_++;
if (unicode_high_surrogate != -1 && *cursor_ != 'u') {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
switch (*cursor_) {
case 'n':
attribute_ += '\n';
cursor_++;
break;
case 't':
attribute_ += '\t';
cursor_++;
break;
case 'r':
attribute_ += '\r';
cursor_++;
break;
case 'b':
attribute_ += '\b';
cursor_++;
break;
case 'f':
attribute_ += '\f';
cursor_++;
break;
case '\"':
attribute_ += '\"';
cursor_++;
break;
case '\'':
attribute_ += '\'';
cursor_++;
break;
case '\\':
attribute_ += '\\';
cursor_++;
break;
case '/':
attribute_ += '/';
cursor_++;
break;
case 'x': { // Not in the JSON standard
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(2, &val));
attribute_ += static_cast<char>(val);
break;
}
case 'u': {
cursor_++;
uint64_t val;
ECHECK(ParseHexNum(4, &val));
if (val >= 0xD800 && val <= 0xDBFF) {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (multiple high surrogates)");
} else {
unicode_high_surrogate = static_cast<int>(val);
}
} else if (val >= 0xDC00 && val <= 0xDFFF) {
if (unicode_high_surrogate == -1) {
return Error(
"illegal Unicode sequence (unpaired low surrogate)");
} else {
int code_point = 0x10000 +
((unicode_high_surrogate & 0x03FF) << 10) +
(val & 0x03FF);
ToUTF8(code_point, &attribute_);
unicode_high_surrogate = -1;
}
} else {
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
ToUTF8(static_cast<int>(val), &attribute_);
}
break;
}
default: return Error("unknown escape code in string constant");
}
} else { // printable chars + UTF-8 bytes
if (unicode_high_surrogate != -1) {
return Error(
"illegal Unicode sequence (unpaired high surrogate)");
}
// reset if non-printable
attr_is_trivial_ascii_string_ &=
check_ascii_range(*cursor_, ' ', '~');
attribute_ += *cursor_++;
}
}
if (unicode_high_surrogate != -1) {
return Error("illegal Unicode sequence (unpaired high surrogate)");
}
cursor_++;
if (!attr_is_trivial_ascii_string_ && !opts.allow_non_utf8 &&
!ValidateUTF8(attribute_)) {
return Error("illegal UTF-8 sequence");
}
token_ = kTokenStringConstant;
return NoError();
}
case '/':
if (*cursor_ == '/') {
const char *start = ++cursor_;
while (*cursor_ && *cursor_ != '\n' && *cursor_ != '\r') cursor_++;
if (*start == '/') { // documentation comment
if (!seen_newline)
return Error(
"a documentation comment should be on a line on its own");
doc_comment_.push_back(std::string(start + 1, cursor_));
}
break;
} else if (*cursor_ == '*') {
cursor_++;
// TODO: make nested.
while (*cursor_ != '*' || cursor_[1] != '/') {
if (*cursor_ == '\n') MarkNewLine();
if (!*cursor_) return Error("end of file in comment");
cursor_++;
}
cursor_ += 2;
break;
}
FLATBUFFERS_FALLTHROUGH(); // else fall thru
default:
const auto has_sign = (c == '+') || (c == '-');
// '-'/'+' and following identifier - can be a predefined constant like:
// NAN, INF, PI, etc.
if (IsIdentifierStart(c) || (has_sign && IsIdentifierStart(*cursor_))) {
// Collect all chars of an identifier:
const char *start = cursor_ - 1;
while (IsIdentifierStart(*cursor_) || is_digit(*cursor_)) cursor_++;
attribute_.append(start, cursor_);
token_ = has_sign ? kTokenStringConstant : kTokenIdentifier;
return NoError();
}
auto dot_lvl =
(c == '.') ? 0 : 1; // dot_lvl==0 <=> exactly one '.' seen
if (!dot_lvl && !is_digit(*cursor_)) return NoError(); // enum?
// Parser accepts hexadecimal-floating-literal (see C++ 5.13.4).
if (is_digit(c) || has_sign || !dot_lvl) {
const auto start = cursor_ - 1;
auto start_digits = !is_digit(c) ? cursor_ : cursor_ - 1;
if (!is_digit(c) && is_digit(*cursor_)) {
start_digits = cursor_; // see digit in cursor_ position
c = *cursor_++;
}
// hex-float can't begind with '.'
auto use_hex = dot_lvl && (c == '0') && is_alpha_char(*cursor_, 'X');
if (use_hex) start_digits = ++cursor_; // '0x' is the prefix, skip it
// Read an integer number or mantisa of float-point number.
do {
if (use_hex) {
while (is_xdigit(*cursor_)) cursor_++;
} else {
while (is_digit(*cursor_)) cursor_++;
}
} while ((*cursor_ == '.') && (++cursor_) && (--dot_lvl >= 0));
// Exponent of float-point number.
if ((dot_lvl >= 0) && (cursor_ > start_digits)) {
// The exponent suffix of hexadecimal float number is mandatory.
if (use_hex && !dot_lvl) start_digits = cursor_;
if ((use_hex && is_alpha_char(*cursor_, 'P')) ||
is_alpha_char(*cursor_, 'E')) {
dot_lvl = 0; // Emulate dot to signal about float-point number.
cursor_++;
if (*cursor_ == '+' || *cursor_ == '-') cursor_++;
start_digits = cursor_; // the exponent-part has to have digits
// Exponent is decimal integer number
while (is_digit(*cursor_)) cursor_++;
if (*cursor_ == '.') {
cursor_++; // If see a dot treat it as part of invalid number.
dot_lvl = -1; // Fall thru to Error().
}
}
}
// Finalize.
if ((dot_lvl >= 0) && (cursor_ > start_digits)) {
attribute_.append(start, cursor_);
token_ = dot_lvl ? kTokenIntegerConstant : kTokenFloatConstant;
return NoError();
} else {
return Error("invalid number: " + std::string(start, cursor_));
}
}
std::string ch;
ch = c;
if (false == check_ascii_range(c, ' ', '~'))
ch = "code: " + NumToString(c);
return Error("illegal character: " + ch);
}
}
}
// Check if a given token is next.
bool Parser::Is(int t) const { return t == token_; }
bool Parser::IsIdent(const char *id) const {
return token_ == kTokenIdentifier && attribute_ == id;
}
// Expect a given token to be next, consume it, or error if not present.
CheckedError Parser::Expect(int t) {
if (t != token_) {
return Error("expecting: " + TokenToString(t) +
" instead got: " + TokenToStringId(token_));
}
NEXT();
return NoError();
}
CheckedError Parser::ParseNamespacing(std::string *id, std::string *last) {
while (Is('.')) {
NEXT();
*id += ".";
*id += attribute_;
if (last) *last = attribute_;
EXPECT(kTokenIdentifier);
}
return NoError();
}
EnumDef *Parser::LookupEnum(const std::string &id) {
// Search thru parent namespaces.
for (int components = static_cast<int>(current_namespace_->components.size());
components >= 0; components--) {
auto ed = enums_.Lookup(
current_namespace_->GetFullyQualifiedName(id, components));
if (ed) return ed;
}
return nullptr;
}
StructDef *Parser::LookupStruct(const std::string &id) const {
auto sd = structs_.Lookup(id);
if (sd) sd->refcount++;
return sd;
}
CheckedError Parser::ParseTypeIdent(Type &type) {
std::string id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
auto enum_def = LookupEnum(id);
if (enum_def) {
type = enum_def->underlying_type;
if (enum_def->is_union) type.base_type = BASE_TYPE_UNION;
} else {
type.base_type = BASE_TYPE_STRUCT;
type.struct_def = LookupCreateStruct(id);
}
return NoError();
}
// Parse any IDL type.
CheckedError Parser::ParseType(Type &type) {
if (token_ == kTokenIdentifier) {
if (IsIdent("bool")) {
type.base_type = BASE_TYPE_BOOL;
NEXT();
} else if (IsIdent("byte") || IsIdent("int8")) {
type.base_type = BASE_TYPE_CHAR;
NEXT();
} else if (IsIdent("ubyte") || IsIdent("uint8")) {
type.base_type = BASE_TYPE_UCHAR;
NEXT();
} else if (IsIdent("short") || IsIdent("int16")) {
type.base_type = BASE_TYPE_SHORT;
NEXT();
} else if (IsIdent("ushort") || IsIdent("uint16")) {
type.base_type = BASE_TYPE_USHORT;
NEXT();
} else if (IsIdent("int") || IsIdent("int32")) {
type.base_type = BASE_TYPE_INT;
NEXT();
} else if (IsIdent("uint") || IsIdent("uint32")) {
type.base_type = BASE_TYPE_UINT;
NEXT();
} else if (IsIdent("long") || IsIdent("int64")) {
type.base_type = BASE_TYPE_LONG;
NEXT();
} else if (IsIdent("ulong") || IsIdent("uint64")) {
type.base_type = BASE_TYPE_ULONG;
NEXT();
} else if (IsIdent("float") || IsIdent("float32")) {
type.base_type = BASE_TYPE_FLOAT;
NEXT();
} else if (IsIdent("double") || IsIdent("float64")) {
type.base_type = BASE_TYPE_DOUBLE;
NEXT();
} else if (IsIdent("string")) {
type.base_type = BASE_TYPE_STRING;
NEXT();
} else {
ECHECK(ParseTypeIdent(type));
}
} else if (token_ == '[') {
NEXT();
Type subtype;
ECHECK(Recurse([&]() { return ParseType(subtype); }));
if (IsSeries(subtype)) {
// We could support this, but it will complicate things, and it's
// easier to work around with a struct around the inner vector.
return Error("nested vector types not supported (wrap in table first)");
}
if (token_ == ':') {
NEXT();
if (token_ != kTokenIntegerConstant) {
return Error("length of fixed-length array must be an integer value");
}
uint16_t fixed_length = 0;
bool check = StringToNumber(attribute_.c_str(), &fixed_length);
if (!check || fixed_length < 1) {
return Error(
"length of fixed-length array must be positive and fit to "
"uint16_t type");
}
type = Type(BASE_TYPE_ARRAY, subtype.struct_def, subtype.enum_def,
fixed_length);
NEXT();
} else {
type = Type(BASE_TYPE_VECTOR, subtype.struct_def, subtype.enum_def);
}
type.element = subtype.base_type;
EXPECT(']');
} else {
return Error("illegal type syntax");
}
return NoError();
}
CheckedError Parser::AddField(StructDef &struct_def, const std::string &name,
const Type &type, FieldDef **dest) {
auto &field = *new FieldDef();
field.value.offset =
FieldIndexToOffset(static_cast<voffset_t>(struct_def.fields.vec.size()));
field.name = name;
field.file = struct_def.file;
field.value.type = type;
if (struct_def.fixed) { // statically compute the field offset
auto size = InlineSize(type);
auto alignment = InlineAlignment(type);
// structs_ need to have a predictable format, so we need to align to
// the largest scalar
struct_def.minalign = std::max(struct_def.minalign, alignment);
struct_def.PadLastField(alignment);
field.value.offset = static_cast<voffset_t>(struct_def.bytesize);
struct_def.bytesize += size;
}
if (struct_def.fields.Add(name, &field))
return Error("field already exists: " + name);
*dest = &field;
return NoError();
}
CheckedError Parser::ParseField(StructDef &struct_def) {
std::string name = attribute_;
if (LookupCreateStruct(name, false, false))
return Error("field name can not be the same as table/struct name");
std::vector<std::string> dc = doc_comment_;
EXPECT(kTokenIdentifier);
EXPECT(':');
Type type;
ECHECK(ParseType(type));
if (struct_def.fixed && !IsScalar(type.base_type) && !IsStruct(type) &&
!IsArray(type))
return Error("structs_ may contain only scalar or struct fields");
if (!struct_def.fixed && IsArray(type))
return Error("fixed-length array in table must be wrapped in struct");
if (IsArray(type) && !SupportsAdvancedArrayFeatures()) {
return Error(
"Arrays are not yet supported in all "
"the specified programming languages.");
}
FieldDef *typefield = nullptr;
if (type.base_type == BASE_TYPE_UNION) {
// For union fields, add a second auto-generated field to hold the type,
// with a special suffix.
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(),
type.enum_def->underlying_type, &typefield));
} else if (type.base_type == BASE_TYPE_VECTOR &&
type.element == BASE_TYPE_UNION) {
// Only cpp, js and ts supports the union vector feature so far.
if (!SupportsAdvancedUnionFeatures()) {
return Error(
"Vectors of unions are not yet supported in all "
"the specified programming languages.");
}
// For vector of union fields, add a second auto-generated vector field to
// hold the types, with a special suffix.
Type union_vector(BASE_TYPE_VECTOR, nullptr, type.enum_def);
union_vector.element = BASE_TYPE_UTYPE;
ECHECK(AddField(struct_def, name + UnionTypeFieldSuffix(), union_vector,
&typefield));
}
FieldDef *field;
ECHECK(AddField(struct_def, name, type, &field));
if (token_ == '=') {
NEXT();
ECHECK(ParseSingleValue(&field->name, field->value, true));
if (!IsScalar(type.base_type) ||
(struct_def.fixed && field->value.constant != "0"))
return Error(
"default values currently only supported for scalars in tables");
}
// Append .0 if the value has not it (skip hex and scientific floats).
// This suffix needed for generated C++ code.
if (IsFloat(type.base_type)) {
auto &text = field->value.constant;
FLATBUFFERS_ASSERT(false == text.empty());
auto s = text.c_str();
while (*s == ' ') s++;
if (*s == '-' || *s == '+') s++;
// 1) A float constants (nan, inf, pi, etc) is a kind of identifier.
// 2) A float number needn't ".0" at the end if it has exponent.
if ((false == IsIdentifierStart(*s)) &&
(std::string::npos == field->value.constant.find_first_of(".eEpP"))) {
field->value.constant += ".0";
}
}
if (type.enum_def) {
// The type.base_type can only be scalar, union, array or vector.
// Table, struct or string can't have enum_def.
// Default value of union and vector in NONE, NULL translated to "0".
FLATBUFFERS_ASSERT(IsInteger(type.base_type) ||
(type.base_type == BASE_TYPE_UNION) ||
(type.base_type == BASE_TYPE_VECTOR) ||
(type.base_type == BASE_TYPE_ARRAY));
if (type.base_type == BASE_TYPE_VECTOR) {
// Vector can't use initialization list.
FLATBUFFERS_ASSERT(field->value.constant == "0");
} else {
// All unions should have the NONE ("0") enum value.
auto in_enum = type.enum_def->attributes.Lookup("bit_flags") ||
type.enum_def->FindByValue(field->value.constant);
if (false == in_enum)
return Error("default value of " + field->value.constant +
" for field " + name + " is not part of enum " +
type.enum_def->name);
}
}
field->doc_comment = dc;
ECHECK(ParseMetaData(&field->attributes));
field->deprecated = field->attributes.Lookup("deprecated") != nullptr;
auto hash_name = field->attributes.Lookup("hash");
if (hash_name) {
switch ((type.base_type == BASE_TYPE_VECTOR) ? type.element
: type.base_type) {
case BASE_TYPE_SHORT:
case BASE_TYPE_USHORT: {
if (FindHashFunction16(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 16 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT: {
if (FindHashFunction32(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 32 bit types: " +
hash_name->constant);
break;
}
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (FindHashFunction64(hash_name->constant.c_str()) == nullptr)
return Error("Unknown hashing algorithm for 64 bit types: " +
hash_name->constant);
break;
}
default:
return Error(
"only short, ushort, int, uint, long and ulong data types support "
"hashing.");
}
}
auto cpp_type = field->attributes.Lookup("cpp_type");
if (cpp_type) {
if (!hash_name)
return Error("cpp_type can only be used with a hashed field");
/// forcing cpp_ptr_type to 'naked' if unset
auto cpp_ptr_type = field->attributes.Lookup("cpp_ptr_type");
if (!cpp_ptr_type) {
auto val = new Value();
val->type = cpp_type->type;
val->constant = "naked";
field->attributes.Add("cpp_ptr_type", val);
}
}
if (field->deprecated && struct_def.fixed)
return Error("can't deprecate fields in a struct");
field->required = field->attributes.Lookup("required") != nullptr;
if (field->required && (struct_def.fixed || IsScalar(type.base_type)))
return Error("only non-scalar fields in tables may be 'required'");
field->key = field->attributes.Lookup("key") != nullptr;
if (field->key) {
if (struct_def.has_key) return Error("only one field may be set as 'key'");
struct_def.has_key = true;
if (!IsScalar(type.base_type)) {
field->required = true;
if (type.base_type != BASE_TYPE_STRING)
return Error("'key' field must be string or scalar type");
}
}
field->shared = field->attributes.Lookup("shared") != nullptr;
if (field->shared && field->value.type.base_type != BASE_TYPE_STRING)
return Error("shared can only be defined on strings");
auto field_native_custom_alloc =
field->attributes.Lookup("native_custom_alloc");
if (field_native_custom_alloc)
return Error(
"native_custom_alloc can only be used with a table or struct "
"definition");
field->native_inline = field->attributes.Lookup("native_inline") != nullptr;
if (field->native_inline && !IsStruct(field->value.type))
return Error("native_inline can only be defined on structs");
auto nested = field->attributes.Lookup("nested_flatbuffer");
if (nested) {
if (nested->type.base_type != BASE_TYPE_STRING)
return Error(
"nested_flatbuffer attribute must be a string (the root type)");
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
return Error(
"nested_flatbuffer attribute may only apply to a vector of ubyte");
// This will cause an error if the root type of the nested flatbuffer
// wasn't defined elsewhere.
field->nested_flatbuffer = LookupCreateStruct(nested->constant);
}
if (field->attributes.Lookup("flexbuffer")) {
field->flexbuffer = true;
uses_flexbuffers_ = true;
if (type.base_type != BASE_TYPE_VECTOR || type.element != BASE_TYPE_UCHAR)
return Error("flexbuffer attribute may only apply to a vector of ubyte");
}
if (typefield) {
if (!IsScalar(typefield->value.type.base_type)) {
// this is a union vector field
typefield->required = field->required;
}
// If this field is a union, and it has a manually assigned id,
// the automatically added type field should have an id as well (of N - 1).
auto attr = field->attributes.Lookup("id");
if (attr) {
auto id = atoi(attr->constant.c_str());
auto val = new Value();
val->type = attr->type;
val->constant = NumToString(id - 1);
typefield->attributes.Add("id", val);
}
}
EXPECT(';');
return NoError();
}
CheckedError Parser::ParseString(Value &val) {
auto s = attribute_;
EXPECT(kTokenStringConstant);
val.constant = NumToString(builder_.CreateString(s).o);
return NoError();
}
CheckedError Parser::ParseComma() {
if (!opts.protobuf_ascii_alike) EXPECT(',');
return NoError();
}
CheckedError Parser::ParseAnyValue(Value &val, FieldDef *field,
size_t parent_fieldn,
const StructDef *parent_struct_def,
uoffset_t count, bool inside_vector) {
switch (val.type.base_type) {
case BASE_TYPE_UNION: {
FLATBUFFERS_ASSERT(field);
std::string constant;
Vector<uint8_t> *vector_of_union_types = nullptr;
// Find corresponding type field we may have already parsed.
for (auto elem = field_stack_.rbegin() + count;
elem != field_stack_.rbegin() + parent_fieldn + count; ++elem) {
auto &type = elem->second->value.type;
if (type.enum_def == val.type.enum_def) {
if (inside_vector) {
if (type.base_type == BASE_TYPE_VECTOR &&
type.element == BASE_TYPE_UTYPE) {
// Vector of union type field.
uoffset_t offset;
ECHECK(atot(elem->first.constant.c_str(), *this, &offset));
vector_of_union_types = reinterpret_cast<Vector<uint8_t> *>(
builder_.GetCurrentBufferPointer() + builder_.GetSize() -
offset);
break;
}
} else {
if (type.base_type == BASE_TYPE_UTYPE) {
// Union type field.
constant = elem->first.constant;
break;
}
}
}
}
if (constant.empty() && !inside_vector) {
// We haven't seen the type field yet. Sadly a lot of JSON writers
// output these in alphabetical order, meaning it comes after this
// value. So we scan past the value to find it, then come back here.
// We currently don't do this for vectors of unions because the
// scanning/serialization logic would get very complicated.
auto type_name = field->name + UnionTypeFieldSuffix();
FLATBUFFERS_ASSERT(parent_struct_def);
auto type_field = parent_struct_def->fields.Lookup(type_name);
FLATBUFFERS_ASSERT(type_field); // Guaranteed by ParseField().
// Remember where we are in the source file, so we can come back here.
auto backup = *static_cast<ParserState *>(this);
ECHECK(SkipAnyJsonValue()); // The table.
ECHECK(ParseComma());
auto next_name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(kTokenIdentifier);
}
if (next_name == type_name) {
EXPECT(':');
Value type_val = type_field->value;
ECHECK(ParseAnyValue(type_val, type_field, 0, nullptr, 0));
constant = type_val.constant;
// Got the information we needed, now rewind:
*static_cast<ParserState *>(this) = backup;
}
}
if (constant.empty() && !vector_of_union_types) {
return Error("missing type field for this union value: " + field->name);
}
uint8_t enum_idx;
if (vector_of_union_types) {
enum_idx = vector_of_union_types->Get(count);
} else {
ECHECK(atot(constant.c_str(), *this, &enum_idx));
}
auto enum_val = val.type.enum_def->ReverseLookup(enum_idx, true);
if (!enum_val) return Error("illegal type id for: " + field->name);
if (enum_val->union_type.base_type == BASE_TYPE_STRUCT) {
ECHECK(ParseTable(*enum_val->union_type.struct_def, &val.constant,
nullptr));
if (enum_val->union_type.struct_def->fixed) {
// All BASE_TYPE_UNION values are offsets, so turn this into one.
SerializeStruct(*enum_val->union_type.struct_def, val);
builder_.ClearOffsets();
val.constant = NumToString(builder_.GetSize());
}
} else if (enum_val->union_type.base_type == BASE_TYPE_STRING) {
ECHECK(ParseString(val));
} else {
FLATBUFFERS_ASSERT(false);
}
break;
}
case BASE_TYPE_STRUCT:
ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr));
break;
case BASE_TYPE_STRING: {
ECHECK(ParseString(val));
break;
}
case BASE_TYPE_VECTOR: {
uoffset_t off;
ECHECK(ParseVector(val.type.VectorType(), &off, field, parent_fieldn));
val.constant = NumToString(off);
break;
}
case BASE_TYPE_ARRAY: {
ECHECK(ParseArray(val));
break;
}
case BASE_TYPE_INT:
case BASE_TYPE_UINT:
case BASE_TYPE_LONG:
case BASE_TYPE_ULONG: {
if (field && field->attributes.Lookup("hash") &&
(token_ == kTokenIdentifier || token_ == kTokenStringConstant)) {
ECHECK(ParseHash(val, field));
} else {
ECHECK(ParseSingleValue(field ? &field->name : nullptr, val, false));
}
break;
}
default:
ECHECK(ParseSingleValue(field ? &field->name : nullptr, val, false));
break;
}
return NoError();
}
void Parser::SerializeStruct(const StructDef &struct_def, const Value &val) {
SerializeStruct(builder_, struct_def, val);
}
void Parser::SerializeStruct(FlatBufferBuilder &builder,
const StructDef &struct_def, const Value &val) {
FLATBUFFERS_ASSERT(val.constant.length() == struct_def.bytesize);
builder.Align(struct_def.minalign);
builder.PushBytes(reinterpret_cast<const uint8_t *>(val.constant.c_str()),
struct_def.bytesize);
builder.AddStructOffset(val.offset, builder.GetSize());
}
template<typename F>
CheckedError Parser::ParseTableDelimiters(size_t &fieldn,
const StructDef *struct_def, F body) {
// We allow tables both as JSON object{ .. } with field names
// or vector[..] with all fields in order
char terminator = '}';
bool is_nested_vector = struct_def && Is('[');
if (is_nested_vector) {
NEXT();
terminator = ']';
} else {
EXPECT('{');
}
for (;;) {
if ((!opts.strict_json || !fieldn) && Is(terminator)) break;
std::string name;
if (is_nested_vector) {
if (fieldn >= struct_def->fields.vec.size()) {
return Error("too many unnamed fields in nested array");
}
name = struct_def->fields.vec[fieldn]->name;
} else {
name = attribute_;
if (Is(kTokenStringConstant)) {
NEXT();
} else {
EXPECT(opts.strict_json ? kTokenStringConstant : kTokenIdentifier);
}
if (!opts.protobuf_ascii_alike || !(Is('{') || Is('['))) EXPECT(':');
}
ECHECK(body(name, fieldn, struct_def));
if (Is(terminator)) break;
ECHECK(ParseComma());
}
NEXT();
if (is_nested_vector && fieldn != struct_def->fields.vec.size()) {
return Error("wrong number of unnamed fields in table vector");
}
return NoError();
}
CheckedError Parser::ParseTable(const StructDef &struct_def, std::string *value,
uoffset_t *ovalue) {
size_t fieldn_outer = 0;
auto err = ParseTableDelimiters(
fieldn_outer, &struct_def,
[&](const std::string &name, size_t &fieldn,
const StructDef *struct_def_inner) -> CheckedError {
if (name == "$schema") {
ECHECK(Expect(kTokenStringConstant));
return NoError();
}
auto field = struct_def_inner->fields.Lookup(name);
if (!field) {
if (!opts.skip_unexpected_fields_in_json) {
return Error("unknown field: " + name);
} else {
ECHECK(SkipAnyJsonValue());
}
} else {
if (IsIdent("null") && !IsScalar(field->value.type.base_type)) {
ECHECK(Next()); // Ignore this field.
} else {
Value val = field->value;
if (field->flexbuffer) {
flexbuffers::Builder builder(1024,
flexbuffers::BUILDER_FLAG_SHARE_ALL);
ECHECK(ParseFlexBufferValue(&builder));
builder.Finish();
// Force alignment for nested flexbuffer
builder_.ForceVectorAlignment(builder.GetSize(), sizeof(uint8_t),
sizeof(largest_scalar_t));
auto off = builder_.CreateVector(builder.GetBuffer());
val.constant = NumToString(off.o);
} else if (field->nested_flatbuffer) {
ECHECK(
ParseNestedFlatbuffer(val, field, fieldn, struct_def_inner));
} else {
ECHECK(Recurse([&]() {
return ParseAnyValue(val, field, fieldn, struct_def_inner, 0);
}));
}
// Hardcoded insertion-sort with error-check.
// If fields are specified in order, then this loop exits
// immediately.
auto elem = field_stack_.rbegin();
for (; elem != field_stack_.rbegin() + fieldn; ++elem) {
auto existing_field = elem->second;
if (existing_field == field)
return Error("field set more than once: " + field->name);
if (existing_field->value.offset < field->value.offset) break;
}
// Note: elem points to before the insertion point, thus .base()
// points to the correct spot.
field_stack_.insert(elem.base(), std::make_pair(val, field));
fieldn++;
}
}
return NoError();
});
ECHECK(err);
// Check if all required fields are parsed.
for (auto field_it = struct_def.fields.vec.begin();
field_it != struct_def.fields.vec.end(); ++field_it) {
auto required_field = *field_it;
if (!required_field->required) { continue; }
bool found = false;
for (auto pf_it = field_stack_.end() - fieldn_outer;
pf_it != field_stack_.end(); ++pf_it) {
auto parsed_field = pf_it->second;
if (parsed_field == required_field) {
found = true;
break;
}
}
if (!found) {
return Error("required field is missing: " + required_field->name +
" in " + struct_def.name);
}
}
if (struct_def.fixed && fieldn_outer != struct_def.fields.vec.size())
return Error("struct: wrong number of initializers: " + struct_def.name);
auto start = struct_def.fixed ? builder_.StartStruct(struct_def.minalign)
: builder_.StartTable();
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size;
size /= 2) {
// Go through elements in reverse, since we're building the data backwards.
for (auto it = field_stack_.rbegin();
it != field_stack_.rbegin() + fieldn_outer; ++it) {
auto &field_value = it->first;
auto field = it->second;
if (!struct_def.sortbysize ||
size == SizeOf(field_value.type.base_type)) {
switch (field_value.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (struct_def.fixed) { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.PushElement(val); \
} else { \
CTYPE val, valdef; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
ECHECK(atot(field->value.constant.c_str(), *this, &valdef)); \
builder_.AddElement(field_value.offset, val, valdef); \
} \
break;
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
builder_.Pad(field->padding); \
if (IsStruct(field->value.type)) { \
SerializeStruct(*field->value.type.struct_def, field_value); \
} else { \
CTYPE val; \
ECHECK(atot(field_value.constant.c_str(), *this, &val)); \
builder_.AddOffset(field_value.offset, val); \
} \
break;
FLATBUFFERS_GEN_TYPES_POINTER(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
case BASE_TYPE_ARRAY:
builder_.Pad(field->padding);
builder_.PushBytes(
reinterpret_cast<const uint8_t*>(field_value.constant.c_str()),
InlineSize(field_value.type));
break;
// clang-format on
}
}
}
}
for (size_t i = 0; i < fieldn_outer; i++) field_stack_.pop_back();
if (struct_def.fixed) {
builder_.ClearOffsets();
builder_.EndStruct();
FLATBUFFERS_ASSERT(value);
// Temporarily store this struct in the value string, since it is to
// be serialized in-place elsewhere.
value->assign(
reinterpret_cast<const char *>(builder_.GetCurrentBufferPointer()),
struct_def.bytesize);
builder_.PopBytes(struct_def.bytesize);
FLATBUFFERS_ASSERT(!ovalue);
} else {
auto val = builder_.EndTable(start);
if (ovalue) *ovalue = val;
if (value) *value = NumToString(val);
}
return NoError();
}
template<typename F>
CheckedError Parser::ParseVectorDelimiters(uoffset_t &count, F body) {
EXPECT('[');
for (;;) {
if ((!opts.strict_json || !count) && Is(']')) break;
ECHECK(body(count));
count++;
if (Is(']')) break;
ECHECK(ParseComma());
}
NEXT();
return NoError();
}
static bool CompareType(const uint8_t *a, const uint8_t *b, BaseType ftype) {
switch (ftype) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_##ENUM: return ReadScalar<CTYPE>(a) < ReadScalar<CTYPE>(b);
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
case BASE_TYPE_STRING:
// Indirect offset pointer to string pointer.
a += ReadScalar<uoffset_t>(a);
b += ReadScalar<uoffset_t>(b);
return *reinterpret_cast<const String *>(a) <
*reinterpret_cast<const String *>(b);
default: return false;
}
}
// See below for why we need our own sort :(
template<typename T, typename F, typename S>
void SimpleQsort(T *begin, T *end, size_t width, F comparator, S swapper) {
if (end - begin <= static_cast<ptrdiff_t>(width)) return;
auto l = begin + width;
auto r = end;
while (l < r) {
if (comparator(begin, l)) {
r -= width;
swapper(l, r);
} else {
l++;
}
}
l -= width;
swapper(begin, l);
SimpleQsort(begin, l, width, comparator, swapper);
SimpleQsort(r, end, width, comparator, swapper);
}
CheckedError Parser::ParseVector(const Type &type, uoffset_t *ovalue,
FieldDef *field, size_t fieldn) {
uoffset_t count = 0;
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
Value val;
val.type = type;
ECHECK(Recurse([&]() {
return ParseAnyValue(val, field, fieldn, nullptr, count, true);
}));
field_stack_.push_back(std::make_pair(val, nullptr));
return NoError();
});
ECHECK(err);
builder_.StartVector(count * InlineSize(type) / InlineAlignment(type),
InlineAlignment(type));
for (uoffset_t i = 0; i < count; i++) {
// start at the back, since we're building the data backwards.
auto &val = field_stack_.back().first;
switch (val.type.base_type) {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE,...) \
case BASE_TYPE_ ## ENUM: \
if (IsStruct(val.type)) SerializeStruct(*val.type.struct_def, val); \
else { \
CTYPE elem; \
ECHECK(atot(val.constant.c_str(), *this, &elem)); \
builder_.PushElement(elem); \
} \
break;
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
}
field_stack_.pop_back();
}
builder_.ClearOffsets();
*ovalue = builder_.EndVector(count);
if (type.base_type == BASE_TYPE_STRUCT && type.struct_def->has_key) {
// We should sort this vector. Find the key first.
const FieldDef *key = nullptr;
for (auto it = type.struct_def->fields.vec.begin();
it != type.struct_def->fields.vec.end(); ++it) {
if ((*it)->key) {
key = (*it);
break;
}
}
assert(key);
// Now sort it.
// We can't use std::sort because for structs the size is not known at
// compile time, and for tables our iterators dereference offsets, so can't
// be used to swap elements.
// And we can't use C qsort either, since that would force use to use
// globals, making parsing thread-unsafe.
// So for now, we use SimpleQsort above.
// TODO: replace with something better, preferably not recursive.
static voffset_t offset = key->value.offset;
static BaseType ftype = key->value.type.base_type;
if (type.struct_def->fixed) {
auto v =
reinterpret_cast<VectorOfAny *>(builder_.GetCurrentBufferPointer());
SimpleQsort<uint8_t>(
v->Data(), v->Data() + v->size() * type.struct_def->bytesize,
type.struct_def->bytesize,
[](const uint8_t *a, const uint8_t *b) -> bool {
return CompareType(a + offset, b + offset, ftype);
},
[&](uint8_t *a, uint8_t *b) {
// FIXME: faster?
for (size_t i = 0; i < type.struct_def->bytesize; i++) {
std::swap(a[i], b[i]);
}
});
} else {
auto v = reinterpret_cast<Vector<Offset<Table>> *>(
builder_.GetCurrentBufferPointer());
// Here also can't use std::sort. We do have an iterator type for it,
// but it is non-standard as it will dereference the offsets, and thus
// can't be used to swap elements.
SimpleQsort<Offset<Table>>(
v->data(), v->data() + v->size(), 1,
[](const Offset<Table> *_a, const Offset<Table> *_b) -> bool {
// Indirect offset pointer to table pointer.
auto a = reinterpret_cast<const uint8_t *>(_a) +
ReadScalar<uoffset_t>(_a);
auto b = reinterpret_cast<const uint8_t *>(_b) +
ReadScalar<uoffset_t>(_b);
// Fetch field address from table.
a = reinterpret_cast<const Table *>(a)->GetAddressOf(offset);
b = reinterpret_cast<const Table *>(b)->GetAddressOf(offset);
return CompareType(a, b, ftype);
},
[&](Offset<Table> *a, Offset<Table> *b) {
// These are serialized offsets, so are relative where they are
// stored in memory, so compute the distance between these pointers:
ptrdiff_t diff = (b - a) * sizeof(Offset<Table>);
assert(diff >= 0); // Guaranteed by SimpleQsort.
auto udiff = static_cast<uoffset_t>(diff);
a->o = EndianScalar(ReadScalar<uoffset_t>(a) - udiff);
b->o = EndianScalar(ReadScalar<uoffset_t>(b) + udiff);
std::swap(*a, *b);
});
}
}
return NoError();
}
CheckedError Parser::ParseArray(Value &array) {
std::vector<Value> stack;
FlatBufferBuilder builder;
const auto &type = array.type.VectorType();
auto length = array.type.fixed_length;
uoffset_t count = 0;
auto err = ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
vector_emplace_back(&stack, Value());
auto &val = stack.back();
val.type = type;
if (IsStruct(type)) {
ECHECK(ParseTable(*val.type.struct_def, &val.constant, nullptr));
} else {
ECHECK(ParseSingleValue(nullptr, val, false));
}
return NoError();
});
ECHECK(err);
if (length != count) return Error("Fixed-length array size is incorrect.");
for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
auto &val = *it;
// clang-format off
switch (val.type.base_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: \
if (IsStruct(val.type)) { \
SerializeStruct(builder, *val.type.struct_def, val); \
} else { \
CTYPE elem; \
ECHECK(atot(val.constant.c_str(), *this, &elem)); \
builder.PushElement(elem); \
} \
break;
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: FLATBUFFERS_ASSERT(0);
}
// clang-format on
}
array.constant.assign(
reinterpret_cast<const char *>(builder.GetCurrentBufferPointer()),
InlineSize(array.type));
return NoError();
}
CheckedError Parser::ParseNestedFlatbuffer(Value &val, FieldDef *field,
size_t fieldn,
const StructDef *parent_struct_def) {
if (token_ == '[') { // backwards compat for 'legacy' ubyte buffers
ECHECK(ParseAnyValue(val, field, fieldn, parent_struct_def, 0));
} else {
auto cursor_at_value_begin = cursor_;
ECHECK(SkipAnyJsonValue());
std::string substring(cursor_at_value_begin - 1, cursor_ - 1);
// Create and initialize new parser
Parser nested_parser;
FLATBUFFERS_ASSERT(field->nested_flatbuffer);
nested_parser.root_struct_def_ = field->nested_flatbuffer;
nested_parser.enums_ = enums_;
nested_parser.opts = opts;
nested_parser.uses_flexbuffers_ = uses_flexbuffers_;
// Parse JSON substring into new flatbuffer builder using nested_parser
bool ok = nested_parser.Parse(substring.c_str(), nullptr, nullptr);
// Clean nested_parser to avoid deleting the elements in
// the SymbolTables on destruction
nested_parser.enums_.dict.clear();
nested_parser.enums_.vec.clear();
if (!ok) { ECHECK(Error(nested_parser.error_)); }
// Force alignment for nested flatbuffer
builder_.ForceVectorAlignment(
nested_parser.builder_.GetSize(), sizeof(uint8_t),
nested_parser.builder_.GetBufferMinAlignment());
auto off = builder_.CreateVector(nested_parser.builder_.GetBufferPointer(),
nested_parser.builder_.GetSize());
val.constant = NumToString(off.o);
}
return NoError();
}
CheckedError Parser::ParseMetaData(SymbolTable<Value> *attributes) {
if (Is('(')) {
NEXT();
for (;;) {
auto name = attribute_;
if (false == (Is(kTokenIdentifier) || Is(kTokenStringConstant)))
return Error("attribute name must be either identifier or string: " +
name);
if (known_attributes_.find(name) == known_attributes_.end())
return Error("user define attributes must be declared before use: " +
name);
NEXT();
auto e = new Value();
attributes->Add(name, e);
if (Is(':')) {
NEXT();
ECHECK(ParseSingleValue(&name, *e, true));
}
if (Is(')')) {
NEXT();
break;
}
EXPECT(',');
}
}
return NoError();
}
CheckedError Parser::TryTypedValue(const std::string *name, int dtoken,
bool check, Value &e, BaseType req,
bool *destmatch) {
bool match = dtoken == token_;
if (match) {
FLATBUFFERS_ASSERT(*destmatch == false);
*destmatch = true;
e.constant = attribute_;
// Check token match
if (!check) {
if (e.type.base_type == BASE_TYPE_NONE) {
e.type.base_type = req;
} else {
return Error(
std::string("type mismatch: expecting: ") +
kTypeNames[e.type.base_type] + ", found: " + kTypeNames[req] +
", name: " + (name ? *name : "") + ", value: " + e.constant);
}
}
// The exponent suffix of hexadecimal float-point number is mandatory.
// A hex-integer constant is forbidden as an initializer of float number.
if ((kTokenFloatConstant != dtoken) && IsFloat(e.type.base_type)) {
const auto &s = e.constant;
const auto k = s.find_first_of("0123456789.");
if ((std::string::npos != k) && (s.length() > (k + 1)) &&
(s[k] == '0' && is_alpha_char(s[k + 1], 'X')) &&
(std::string::npos == s.find_first_of("pP", k + 2))) {
return Error(
"invalid number, the exponent suffix of hexadecimal "
"floating-point literals is mandatory: \"" +
s + "\"");
}
}
NEXT();
}
return NoError();
}
CheckedError Parser::ParseEnumFromString(const Type &type,
std::string *result) {
const auto base_type =
type.enum_def ? type.enum_def->underlying_type.base_type : type.base_type;
if (!IsInteger(base_type)) return Error("not a valid value for this field");
uint64_t u64 = 0;
for (size_t pos = 0; pos != std::string::npos;) {
const auto delim = attribute_.find_first_of(' ', pos);
const auto last = (std::string::npos == delim);
auto word = attribute_.substr(pos, !last ? delim - pos : std::string::npos);
pos = !last ? delim + 1 : std::string::npos;
const EnumVal *ev = nullptr;
if (type.enum_def) {
ev = type.enum_def->Lookup(word);
} else {
auto dot = word.find_first_of('.');
if (std::string::npos == dot)
return Error("enum values need to be qualified by an enum type");
auto enum_def_str = word.substr(0, dot);
const auto enum_def = LookupEnum(enum_def_str);
if (!enum_def) return Error("unknown enum: " + enum_def_str);
auto enum_val_str = word.substr(dot + 1);
ev = enum_def->Lookup(enum_val_str);
}
if (!ev) return Error("unknown enum value: " + word);
u64 |= ev->GetAsUInt64();
}
*result = IsUnsigned(base_type) ? NumToString(u64)
: NumToString(static_cast<int64_t>(u64));
return NoError();
}
CheckedError Parser::ParseHash(Value &e, FieldDef *field) {
FLATBUFFERS_ASSERT(field);
Value *hash_name = field->attributes.Lookup("hash");
switch (e.type.base_type) {
case BASE_TYPE_SHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
int16_t hashed_value = static_cast<int16_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_USHORT: {
auto hash = FindHashFunction16(hash_name->constant.c_str());
uint16_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_INT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
int32_t hashed_value = static_cast<int32_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_UINT: {
auto hash = FindHashFunction32(hash_name->constant.c_str());
uint32_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_LONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
int64_t hashed_value = static_cast<int64_t>(hash(attribute_.c_str()));
e.constant = NumToString(hashed_value);
break;
}
case BASE_TYPE_ULONG: {
auto hash = FindHashFunction64(hash_name->constant.c_str());
uint64_t hashed_value = hash(attribute_.c_str());
e.constant = NumToString(hashed_value);
break;
}
default: FLATBUFFERS_ASSERT(0);
}
NEXT();
return NoError();
}
CheckedError Parser::TokenError() {
return Error("cannot parse value starting with: " + TokenToStringId(token_));
}
// Re-pack helper (ParseSingleValue) to normalize defaults of scalars.
template<typename T> inline void SingleValueRepack(Value &e, T val) {
// Remove leading zeros.
if (IsInteger(e.type.base_type)) { e.constant = NumToString(val); }
}
#if defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0)
// Normilaze defaults NaN to unsigned quiet-NaN(0).
static inline void SingleValueRepack(Value &e, float val) {
if (val != val) e.constant = "nan";
}
static inline void SingleValueRepack(Value &e, double val) {
if (val != val) e.constant = "nan";
}
#endif
CheckedError Parser::ParseSingleValue(const std::string *name, Value &e,
bool check_now) {
// First see if this could be a conversion function:
if (token_ == kTokenIdentifier && *cursor_ == '(') {
// todo: Extract processing of conversion functions to ParseFunction.
const auto functionname = attribute_;
if (!IsFloat(e.type.base_type)) {
return Error(functionname + ": type of argument mismatch, expecting: " +
kTypeNames[BASE_TYPE_DOUBLE] +
", found: " + kTypeNames[e.type.base_type] +
", name: " + (name ? *name : "") + ", value: " + e.constant);
}
NEXT();
EXPECT('(');
ECHECK(Recurse([&]() { return ParseSingleValue(name, e, false); }));
EXPECT(')');
// calculate with double precision
double x, y = 0.0;
ECHECK(atot(e.constant.c_str(), *this, &x));
auto func_match = false;
// clang-format off
#define FLATBUFFERS_FN_DOUBLE(name, op) \
if (!func_match && functionname == name) { y = op; func_match = true; }
FLATBUFFERS_FN_DOUBLE("deg", x / kPi * 180);
FLATBUFFERS_FN_DOUBLE("rad", x * kPi / 180);
FLATBUFFERS_FN_DOUBLE("sin", sin(x));
FLATBUFFERS_FN_DOUBLE("cos", cos(x));
FLATBUFFERS_FN_DOUBLE("tan", tan(x));
FLATBUFFERS_FN_DOUBLE("asin", asin(x));
FLATBUFFERS_FN_DOUBLE("acos", acos(x));
FLATBUFFERS_FN_DOUBLE("atan", atan(x));
// TODO(wvo): add more useful conversion functions here.
#undef FLATBUFFERS_FN_DOUBLE
// clang-format on
if (true != func_match) {
return Error(std::string("Unknown conversion function: ") + functionname +
", field name: " + (name ? *name : "") +
", value: " + e.constant);
}
e.constant = NumToString(y);
return NoError();
}
auto match = false;
const auto in_type = e.type.base_type;
// clang-format off
#define IF_ECHECK_(force, dtoken, check, req) \
if (!match && ((check) || IsConstTrue(force))) \
ECHECK(TryTypedValue(name, dtoken, check, e, req, &match))
#define TRY_ECHECK(dtoken, check, req) IF_ECHECK_(false, dtoken, check, req)
#define FORCE_ECHECK(dtoken, check, req) IF_ECHECK_(true, dtoken, check, req)
// clang-format on
if (token_ == kTokenStringConstant || token_ == kTokenIdentifier) {
const auto kTokenStringOrIdent = token_;
// The string type is a most probable type, check it first.
TRY_ECHECK(kTokenStringConstant, in_type == BASE_TYPE_STRING,
BASE_TYPE_STRING);
// avoid escaped and non-ascii in the string
if (!match && (token_ == kTokenStringConstant) && IsScalar(in_type) &&
!attr_is_trivial_ascii_string_) {
return Error(
std::string("type mismatch or invalid value, an initializer of "
"non-string field must be trivial ASCII string: type: ") +
kTypeNames[in_type] + ", name: " + (name ? *name : "") +
", value: " + attribute_);
}
// A boolean as true/false. Boolean as Integer check below.
if (!match && IsBool(in_type)) {
auto is_true = attribute_ == "true";
if (is_true || attribute_ == "false") {
attribute_ = is_true ? "1" : "0";
// accepts both kTokenStringConstant and kTokenIdentifier
TRY_ECHECK(kTokenStringOrIdent, IsBool(in_type), BASE_TYPE_BOOL);
}
}
// Check if this could be a string/identifier enum value.
// Enum can have only true integer base type.
if (!match && IsInteger(in_type) && !IsBool(in_type) &&
IsIdentifierStart(*attribute_.c_str())) {
ECHECK(ParseEnumFromString(e.type, &e.constant));
NEXT();
match = true;
}
// Parse a float/integer number from the string.
if (!match) check_now = true; // Re-pack if parsed from string literal.
if (!match && (token_ == kTokenStringConstant) && IsScalar(in_type)) {
// remove trailing whitespaces from attribute_
auto last = attribute_.find_last_not_of(' ');
if (std::string::npos != last) // has non-whitespace
attribute_.resize(last + 1);
}
// Float numbers or nan, inf, pi, etc.
TRY_ECHECK(kTokenStringOrIdent, IsFloat(in_type), BASE_TYPE_FLOAT);
// An integer constant in string.
TRY_ECHECK(kTokenStringOrIdent, IsInteger(in_type), BASE_TYPE_INT);
// Unknown tokens will be interpreted as string type.
// An attribute value may be a scalar or string constant.
FORCE_ECHECK(kTokenStringConstant, in_type == BASE_TYPE_STRING,
BASE_TYPE_STRING);
} else {
// Try a float number.
TRY_ECHECK(kTokenFloatConstant, IsFloat(in_type), BASE_TYPE_FLOAT);
// Integer token can init any scalar (integer of float).
FORCE_ECHECK(kTokenIntegerConstant, IsScalar(in_type), BASE_TYPE_INT);
}
#undef FORCE_ECHECK
#undef TRY_ECHECK
#undef IF_ECHECK_
if (!match) {
std::string msg;
msg += "Cannot assign token starting with '" + TokenToStringId(token_) +
"' to value of <" + std::string(kTypeNames[in_type]) + "> type.";
return Error(msg);
}
const auto match_type = e.type.base_type; // may differ from in_type
// The check_now flag must be true when parse a fbs-schema.
// This flag forces to check default scalar values or metadata of field.
// For JSON parser the flag should be false.
// If it is set for JSON each value will be checked twice (see ParseTable).
if (check_now && IsScalar(match_type)) {
// clang-format off
switch (match_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_ ## ENUM: {\
CTYPE val; \
ECHECK(atot(e.constant.c_str(), *this, &val)); \
SingleValueRepack(e, val); \
break; }
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: break;
}
// clang-format on
}
return NoError();
}
StructDef *Parser::LookupCreateStruct(const std::string &name,
bool create_if_new, bool definition) {
std::string qualified_name = current_namespace_->GetFullyQualifiedName(name);
// See if it exists pre-declared by an unqualified use.
auto struct_def = LookupStruct(name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace, and is registered under its
// qualified name.
struct_def->defined_namespace = current_namespace_;
structs_.Move(name, qualified_name);
}
return struct_def;
}
// See if it exists pre-declared by an qualified use.
struct_def = LookupStruct(qualified_name);
if (struct_def && struct_def->predecl) {
if (definition) {
// Make sure it has the current namespace.
struct_def->defined_namespace = current_namespace_;
}
return struct_def;
}
if (!definition) {
// Search thru parent namespaces.
for (size_t components = current_namespace_->components.size();
components && !struct_def; components--) {
struct_def = LookupStruct(
current_namespace_->GetFullyQualifiedName(name, components - 1));
}
}
if (!struct_def && create_if_new) {
struct_def = new StructDef();
if (definition) {
structs_.Add(qualified_name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
} else {
// Not a definition.
// Rather than failing, we create a "pre declared" StructDef, due to
// circular references, and check for errors at the end of parsing.
// It is defined in the current namespace, as the best guess what the
// final namespace will be.
structs_.Add(name, struct_def);
struct_def->name = name;
struct_def->defined_namespace = current_namespace_;
struct_def->original_location.reset(
new std::string(file_being_parsed_ + ":" + NumToString(line_)));
}
}
return struct_def;
}
const EnumVal *EnumDef::MinValue() const {
return vals.vec.empty() ? nullptr : vals.vec.front();
}
const EnumVal *EnumDef::MaxValue() const {
return vals.vec.empty() ? nullptr : vals.vec.back();
}
template<typename T> static uint64_t EnumDistanceImpl(T e1, T e2) {
if (e1 < e2) { std::swap(e1, e2); } // use std for scalars
// Signed overflow may occur, use unsigned calculation.
// The unsigned overflow is well-defined by C++ standard (modulo 2^n).
return static_cast<uint64_t>(e1) - static_cast<uint64_t>(e2);
}
uint64_t EnumDef::Distance(const EnumVal *v1, const EnumVal *v2) const {
return IsUInt64() ? EnumDistanceImpl(v1->GetAsUInt64(), v2->GetAsUInt64())
: EnumDistanceImpl(v1->GetAsInt64(), v2->GetAsInt64());
}
std::string EnumDef::AllFlags() const {
FLATBUFFERS_ASSERT(attributes.Lookup("bit_flags"));
uint64_t u64 = 0;
for (auto it = Vals().begin(); it != Vals().end(); ++it) {
u64 |= (*it)->GetAsUInt64();
}
return IsUInt64() ? NumToString(u64) : NumToString(static_cast<int64_t>(u64));
}
EnumVal *EnumDef::ReverseLookup(int64_t enum_idx,
bool skip_union_default) const {
auto skip_first = static_cast<int>(is_union && skip_union_default);
for (auto it = Vals().begin() + skip_first; it != Vals().end(); ++it) {
if ((*it)->GetAsInt64() == enum_idx) { return *it; }
}
return nullptr;
}
EnumVal *EnumDef::FindByValue(const std::string &constant) const {
int64_t i64;
auto done = false;
if (IsUInt64()) {
uint64_t u64; // avoid reinterpret_cast of pointers
done = StringToNumber(constant.c_str(), &u64);
i64 = static_cast<int64_t>(u64);
} else {
done = StringToNumber(constant.c_str(), &i64);
}
FLATBUFFERS_ASSERT(done);
if (!done) return nullptr;
return ReverseLookup(i64, false);
}
void EnumDef::SortByValue() {
auto &v = vals.vec;
if (IsUInt64())
std::sort(v.begin(), v.end(), [](const EnumVal *e1, const EnumVal *e2) {
return e1->GetAsUInt64() < e2->GetAsUInt64();
});
else
std::sort(v.begin(), v.end(), [](const EnumVal *e1, const EnumVal *e2) {
return e1->GetAsInt64() < e2->GetAsInt64();
});
}
void EnumDef::RemoveDuplicates() {
// This method depends form SymbolTable implementation!
// 1) vals.vec - owner (raw pointer)
// 2) vals.dict - access map
auto first = vals.vec.begin();
auto last = vals.vec.end();
if (first == last) return;
auto result = first;
while (++first != last) {
if ((*result)->value != (*first)->value) {
*(++result) = *first;
} else {
auto ev = *first;
for (auto it = vals.dict.begin(); it != vals.dict.end(); ++it) {
if (it->second == ev) it->second = *result; // reassign
}
delete ev; // delete enum value
*first = nullptr;
}
}
vals.vec.erase(++result, last);
}
template<typename T> void EnumDef::ChangeEnumValue(EnumVal *ev, T new_value) {
ev->value = static_cast<int64_t>(new_value);
}
namespace EnumHelper {
template<BaseType E> struct EnumValType { typedef int64_t type; };
template<> struct EnumValType<BASE_TYPE_ULONG> { typedef uint64_t type; };
} // namespace EnumHelper
struct EnumValBuilder {
EnumVal *CreateEnumerator(const std::string &ev_name) {
FLATBUFFERS_ASSERT(!temp);
auto first = enum_def.vals.vec.empty();
user_value = first;
temp = new EnumVal(ev_name, first ? 0 : enum_def.vals.vec.back()->value);
return temp;
}
EnumVal *CreateEnumerator(const std::string &ev_name, int64_t val) {
FLATBUFFERS_ASSERT(!temp);
user_value = true;
temp = new EnumVal(ev_name, val);
return temp;
}
FLATBUFFERS_CHECKED_ERROR AcceptEnumerator(const std::string &name) {
FLATBUFFERS_ASSERT(temp);
ECHECK(ValidateValue(&temp->value, false == user_value));
FLATBUFFERS_ASSERT((temp->union_type.enum_def == nullptr) ||
(temp->union_type.enum_def == &enum_def));
auto not_unique = enum_def.vals.Add(name, temp);
temp = nullptr;
if (not_unique) return parser.Error("enum value already exists: " + name);
return NoError();
}
FLATBUFFERS_CHECKED_ERROR AcceptEnumerator() {
return AcceptEnumerator(temp->name);
}
FLATBUFFERS_CHECKED_ERROR AssignEnumeratorValue(const std::string &value) {
user_value = true;
auto fit = false;
auto ascending = false;
if (enum_def.IsUInt64()) {
uint64_t u64;
fit = StringToNumber(value.c_str(), &u64);
ascending = u64 > temp->GetAsUInt64();
temp->value = static_cast<int64_t>(u64); // well-defined since C++20.
} else {
int64_t i64;
fit = StringToNumber(value.c_str(), &i64);
ascending = i64 > temp->GetAsInt64();
temp->value = i64;
}
if (!fit) return parser.Error("enum value does not fit, \"" + value + "\"");
if (!ascending && strict_ascending && !enum_def.vals.vec.empty())
return parser.Error("enum values must be specified in ascending order");
return NoError();
}
template<BaseType E, typename CTYPE>
inline FLATBUFFERS_CHECKED_ERROR ValidateImpl(int64_t *ev, int m) {
typedef typename EnumHelper::EnumValType<E>::type T; // int64_t or uint64_t
static_assert(sizeof(T) == sizeof(int64_t), "invalid EnumValType");
const auto v = static_cast<T>(*ev);
auto up = static_cast<T>((flatbuffers::numeric_limits<CTYPE>::max)());
auto dn = static_cast<T>((flatbuffers::numeric_limits<CTYPE>::lowest)());
if (v < dn || v > (up - m)) {
return parser.Error("enum value does not fit, \"" + NumToString(v) +
(m ? " + 1\"" : "\"") + " out of " +
TypeToIntervalString<CTYPE>());
}
*ev = static_cast<int64_t>(v + m); // well-defined since C++20.
return NoError();
}
FLATBUFFERS_CHECKED_ERROR ValidateValue(int64_t *ev, bool next) {
// clang-format off
switch (enum_def.underlying_type.base_type) {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, ...) \
case BASE_TYPE_##ENUM: { \
if (!IsInteger(BASE_TYPE_##ENUM)) break; \
return ValidateImpl<BASE_TYPE_##ENUM, CTYPE>(ev, next ? 1 : 0); \
}
FLATBUFFERS_GEN_TYPES_SCALAR(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
default: break;
}
// clang-format on
return parser.Error("fatal: invalid enum underlying type");
}
EnumValBuilder(Parser &_parser, EnumDef &_enum_def, bool strict_order = true)
: parser(_parser),
enum_def(_enum_def),
temp(nullptr),
strict_ascending(strict_order),
user_value(false) {}
~EnumValBuilder() { delete temp; }
Parser &parser;
EnumDef &enum_def;
EnumVal *temp;
const bool strict_ascending;
bool user_value;
};
CheckedError Parser::ParseEnum(const bool is_union, EnumDef **dest) {
std::vector<std::string> enum_comment = doc_comment_;
NEXT();
std::string enum_name = attribute_;
EXPECT(kTokenIdentifier);
EnumDef *enum_def;
ECHECK(StartEnum(enum_name, is_union, &enum_def));
enum_def->doc_comment = enum_comment;
if (!is_union && !opts.proto_mode) {
// Give specialized error message, since this type spec used to
// be optional in the first FlatBuffers release.
if (!Is(':')) {
return Error(
"must specify the underlying integer type for this"
" enum (e.g. \': short\', which was the default).");
} else {
NEXT();
}
// Specify the integer type underlying this enum.
ECHECK(ParseType(enum_def->underlying_type));
if (!IsInteger(enum_def->underlying_type.base_type) ||
IsBool(enum_def->underlying_type.base_type))
return Error("underlying enum type must be integral");
// Make this type refer back to the enum it was derived from.
enum_def->underlying_type.enum_def = enum_def;
}
ECHECK(ParseMetaData(&enum_def->attributes));
const auto underlying_type = enum_def->underlying_type.base_type;
if (enum_def->attributes.Lookup("bit_flags") &&
!IsUnsigned(underlying_type)) {
// todo: Convert to the Error in the future?
Warning("underlying type of bit_flags enum must be unsigned");
}
// Protobuf allows them to be specified in any order, so sort afterwards.
const auto strict_ascending = (false == opts.proto_mode);
EnumValBuilder evb(*this, *enum_def, strict_ascending);
EXPECT('{');
// A lot of code generatos expect that an enum is not-empty.
if ((is_union || Is('}')) && !opts.proto_mode) {
evb.CreateEnumerator("NONE");
ECHECK(evb.AcceptEnumerator());
}
std::set<std::pair<BaseType, StructDef *>> union_types;
while (!Is('}')) {
if (opts.proto_mode && attribute_ == "option") {
ECHECK(ParseProtoOption());
} else {
auto &ev = *evb.CreateEnumerator(attribute_);
auto full_name = ev.name;
ev.doc_comment = doc_comment_;
EXPECT(kTokenIdentifier);
if (is_union) {
ECHECK(ParseNamespacing(&full_name, &ev.name));
if (opts.union_value_namespacing) {
// Since we can't namespace the actual enum identifiers, turn
// namespace parts into part of the identifier.
ev.name = full_name;
std::replace(ev.name.begin(), ev.name.end(), '.', '_');
}
if (Is(':')) {
NEXT();
ECHECK(ParseType(ev.union_type));
if (ev.union_type.base_type != BASE_TYPE_STRUCT &&
ev.union_type.base_type != BASE_TYPE_STRING)
return Error("union value type may only be table/struct/string");
} else {
ev.union_type = Type(BASE_TYPE_STRUCT, LookupCreateStruct(full_name));
}
if (!enum_def->uses_multiple_type_instances) {
auto ins = union_types.insert(std::make_pair(
ev.union_type.base_type, ev.union_type.struct_def));
enum_def->uses_multiple_type_instances = (false == ins.second);
}
}
if (Is('=')) {
NEXT();
ECHECK(evb.AssignEnumeratorValue(attribute_));
EXPECT(kTokenIntegerConstant);
} else if (false == strict_ascending) {
// The opts.proto_mode flag is active.
return Error("Protobuf mode doesn't allow implicit enum values.");
}
ECHECK(evb.AcceptEnumerator());
if (opts.proto_mode && Is('[')) {
NEXT();
// ignore attributes on enums.
while (token_ != ']') NEXT();
NEXT();
}
}
if (!Is(opts.proto_mode ? ';' : ',')) break;
NEXT();
}
EXPECT('}');
// At this point, the enum can be empty if input is invalid proto-file.
if (!enum_def->size())
return Error("incomplete enum declaration, values not found");
if (enum_def->attributes.Lookup("bit_flags")) {
const auto base_width = static_cast<uint64_t>(8 * SizeOf(underlying_type));
for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end();
++it) {
auto ev = *it;
const auto u = ev->GetAsUInt64();
// Stop manipulations with the sign.
if (!IsUnsigned(underlying_type) && u == (base_width - 1))
return Error("underlying type of bit_flags enum must be unsigned");
if (u >= base_width)
return Error("bit flag out of range of underlying integral type");
enum_def->ChangeEnumValue(ev, 1ULL << u);
}
}
if (false == strict_ascending)
enum_def->SortByValue(); // Must be sorted to use MinValue/MaxValue.
if (dest) *dest = enum_def;
types_.Add(current_namespace_->GetFullyQualifiedName(enum_def->name),
new Type(BASE_TYPE_UNION, nullptr, enum_def));
return NoError();
}
CheckedError Parser::StartStruct(const std::string &name, StructDef **dest) {
auto &struct_def = *LookupCreateStruct(name, true, true);
if (!struct_def.predecl) return Error("datatype already exists: " + name);
struct_def.predecl = false;
struct_def.name = name;
struct_def.file = file_being_parsed_;
// Move this struct to the back of the vector just in case it was predeclared,
// to preserve declaration order.
*std::remove(structs_.vec.begin(), structs_.vec.end(), &struct_def) =
&struct_def;
*dest = &struct_def;
return NoError();
}
CheckedError Parser::CheckClash(std::vector<FieldDef *> &fields,
StructDef *struct_def, const char *suffix,
BaseType basetype) {
auto len = strlen(suffix);
for (auto it = fields.begin(); it != fields.end(); ++it) {
auto &fname = (*it)->name;
if (fname.length() > len &&
fname.compare(fname.length() - len, len, suffix) == 0 &&
(*it)->value.type.base_type != BASE_TYPE_UTYPE) {
auto field =
struct_def->fields.Lookup(fname.substr(0, fname.length() - len));
if (field && field->value.type.base_type == basetype)
return Error("Field " + fname +
" would clash with generated functions for field " +
field->name);
}
}
return NoError();
}
bool Parser::SupportsAdvancedUnionFeatures() const {
return opts.lang_to_generate != 0 &&
(opts.lang_to_generate &
~(IDLOptions::kCpp | IDLOptions::kJs | IDLOptions::kTs |
IDLOptions::kPhp | IDLOptions::kJava | IDLOptions::kCSharp |
IDLOptions::kKotlin | IDLOptions::kBinary)) == 0;
}
bool Parser::SupportsAdvancedArrayFeatures() const {
return (opts.lang_to_generate &
~(IDLOptions::kCpp | IDLOptions::kPython | IDLOptions::kJava |
IDLOptions::kCSharp | IDLOptions::kJsonSchema | IDLOptions::kJson |
IDLOptions::kBinary)) == 0;
}
Namespace *Parser::UniqueNamespace(Namespace *ns) {
for (auto it = namespaces_.begin(); it != namespaces_.end(); ++it) {
if (ns->components == (*it)->components) {
delete ns;
return *it;
}
}
namespaces_.push_back(ns);
return ns;
}
std::string Parser::UnqualifiedName(const std::string &full_qualified_name) {
Namespace *ns = new Namespace();
std::size_t current, previous = 0;
current = full_qualified_name.find('.');
while (current != std::string::npos) {
ns->components.push_back(
full_qualified_name.substr(previous, current - previous));
previous = current + 1;
current = full_qualified_name.find('.', previous);
}
current_namespace_ = UniqueNamespace(ns);
return full_qualified_name.substr(previous, current - previous);
}
static bool compareFieldDefs(const FieldDef *a, const FieldDef *b) {
auto a_id = atoi(a->attributes.Lookup("id")->constant.c_str());
auto b_id = atoi(b->attributes.Lookup("id")->constant.c_str());
return a_id < b_id;
}
CheckedError Parser::ParseDecl() {
std::vector<std::string> dc = doc_comment_;
bool fixed = IsIdent("struct");
if (!fixed && !IsIdent("table")) return Error("declaration expected");
NEXT();
std::string name = attribute_;
EXPECT(kTokenIdentifier);
StructDef *struct_def;
ECHECK(StartStruct(name, &struct_def));
struct_def->doc_comment = dc;
struct_def->fixed = fixed;
ECHECK(ParseMetaData(&struct_def->attributes));
struct_def->sortbysize =
struct_def->attributes.Lookup("original_order") == nullptr && !fixed;
EXPECT('{');
while (token_ != '}') ECHECK(ParseField(*struct_def));
auto force_align = struct_def->attributes.Lookup("force_align");
if (fixed) {
if (force_align) {
auto align = static_cast<size_t>(atoi(force_align->constant.c_str()));
if (force_align->type.base_type != BASE_TYPE_INT ||
align < struct_def->minalign || align > FLATBUFFERS_MAX_ALIGNMENT ||
align & (align - 1))
return Error(
"force_align must be a power of two integer ranging from the"
"struct\'s natural alignment to " +
NumToString(FLATBUFFERS_MAX_ALIGNMENT));
struct_def->minalign = align;
}
if (!struct_def->bytesize) return Error("size 0 structs not allowed");
}
struct_def->PadLastField(struct_def->minalign);
// Check if this is a table that has manual id assignments
auto &fields = struct_def->fields.vec;
if (!fixed && fields.size()) {
size_t num_id_fields = 0;
for (auto it = fields.begin(); it != fields.end(); ++it) {
if ((*it)->attributes.Lookup("id")) num_id_fields++;
}
// If any fields have ids..
if (num_id_fields) {
// Then all fields must have them.
if (num_id_fields != fields.size())
return Error(
"either all fields or no fields must have an 'id' attribute");
// Simply sort by id, then the fields are the same as if no ids had
// been specified.
std::sort(fields.begin(), fields.end(), compareFieldDefs);
// Verify we have a contiguous set, and reassign vtable offsets.
for (int i = 0; i < static_cast<int>(fields.size()); i++) {
if (i != atoi(fields[i]->attributes.Lookup("id")->constant.c_str()))
return Error("field id\'s must be consecutive from 0, id " +
NumToString(i) + " missing or set twice");
fields[i]->value.offset = FieldIndexToOffset(static_cast<voffset_t>(i));
}
}
}
ECHECK(
CheckClash(fields, struct_def, UnionTypeFieldSuffix(), BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "Type", BASE_TYPE_UNION));
ECHECK(CheckClash(fields, struct_def, "_length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "Length", BASE_TYPE_VECTOR));
ECHECK(CheckClash(fields, struct_def, "_byte_vector", BASE_TYPE_STRING));
ECHECK(CheckClash(fields, struct_def, "ByteVector", BASE_TYPE_STRING));
EXPECT('}');
types_.Add(current_namespace_->GetFullyQualifiedName(struct_def->name),
new Type(BASE_TYPE_STRUCT, struct_def, nullptr));
return NoError();
}
CheckedError Parser::ParseService() {
std::vector<std::string> service_comment = doc_comment_;
NEXT();
auto service_name = attribute_;
EXPECT(kTokenIdentifier);
auto &service_def = *new ServiceDef();
service_def.name = service_name;
service_def.file = file_being_parsed_;
service_def.doc_comment = service_comment;
service_def.defined_namespace = current_namespace_;
if (services_.Add(current_namespace_->GetFullyQualifiedName(service_name),
&service_def))
return Error("service already exists: " + service_name);
ECHECK(ParseMetaData(&service_def.attributes));
EXPECT('{');
do {
std::vector<std::string> doc_comment = doc_comment_;
auto rpc_name = attribute_;
EXPECT(kTokenIdentifier);
EXPECT('(');
Type reqtype, resptype;
ECHECK(ParseTypeIdent(reqtype));
EXPECT(')');
EXPECT(':');
ECHECK(ParseTypeIdent(resptype));
if (reqtype.base_type != BASE_TYPE_STRUCT || reqtype.struct_def->fixed ||
resptype.base_type != BASE_TYPE_STRUCT || resptype.struct_def->fixed)
return Error("rpc request and response types must be tables");
auto &rpc = *new RPCCall();
rpc.name = rpc_name;
rpc.request = reqtype.struct_def;
rpc.response = resptype.struct_def;
rpc.doc_comment = doc_comment;
if (service_def.calls.Add(rpc_name, &rpc))
return Error("rpc already exists: " + rpc_name);
ECHECK(ParseMetaData(&rpc.attributes));
EXPECT(';');
} while (token_ != '}');
NEXT();
return NoError();
}
bool Parser::SetRootType(const char *name) {
root_struct_def_ = LookupStruct(name);
if (!root_struct_def_)
root_struct_def_ =
LookupStruct(current_namespace_->GetFullyQualifiedName(name));
return root_struct_def_ != nullptr;
}
void Parser::MarkGenerated() {
// This function marks all existing definitions as having already
// been generated, which signals no code for included files should be
// generated.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
(*it)->generated = true;
}
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
if (!(*it)->predecl) { (*it)->generated = true; }
}
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
(*it)->generated = true;
}
}
CheckedError Parser::ParseNamespace() {
NEXT();
auto ns = new Namespace();
namespaces_.push_back(ns); // Store it here to not leak upon error.
if (token_ != ';') {
for (;;) {
ns->components.push_back(attribute_);
EXPECT(kTokenIdentifier);
if (Is('.')) NEXT() else break;
}
}
namespaces_.pop_back();
current_namespace_ = UniqueNamespace(ns);
EXPECT(';');
return NoError();
}
// Best effort parsing of .proto declarations, with the aim to turn them
// in the closest corresponding FlatBuffer equivalent.
// We parse everything as identifiers instead of keywords, since we don't
// want protobuf keywords to become invalid identifiers in FlatBuffers.
CheckedError Parser::ParseProtoDecl() {
bool isextend = IsIdent("extend");
if (IsIdent("package")) {
// These are identical in syntax to FlatBuffer's namespace decl.
ECHECK(ParseNamespace());
} else if (IsIdent("message") || isextend) {
std::vector<std::string> struct_comment = doc_comment_;
NEXT();
StructDef *struct_def = nullptr;
Namespace *parent_namespace = nullptr;
if (isextend) {
if (Is('.')) NEXT(); // qualified names may start with a . ?
auto id = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&id, nullptr));
struct_def = LookupCreateStruct(id, false);
if (!struct_def)
return Error("cannot extend unknown message type: " + id);
} else {
std::string name = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(StartStruct(name, &struct_def));
// Since message definitions can be nested, we create a new namespace.
auto ns = new Namespace();
// Copy of current namespace.
*ns = *current_namespace_;
// But with current message name.
ns->components.push_back(name);
ns->from_table++;
parent_namespace = current_namespace_;
current_namespace_ = UniqueNamespace(ns);
}
struct_def->doc_comment = struct_comment;
ECHECK(ParseProtoFields(struct_def, isextend, false));
if (!isextend) { current_namespace_ = parent_namespace; }
if (Is(';')) NEXT();
} else if (IsIdent("enum")) {
// These are almost the same, just with different terminator:
EnumDef *enum_def;
ECHECK(ParseEnum(false, &enum_def));
if (Is(';')) NEXT();
// Temp: remove any duplicates, as .fbs files can't handle them.
enum_def->RemoveDuplicates();
} else if (IsIdent("syntax")) { // Skip these.
NEXT();
EXPECT('=');
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("service")) { // Skip these.
NEXT();
EXPECT(kTokenIdentifier);
ECHECK(ParseProtoCurliesOrIdent());
} else {
return Error("don\'t know how to parse .proto declaration starting with " +
TokenToStringId(token_));
}
return NoError();
}
CheckedError Parser::StartEnum(const std::string &enum_name, bool is_union,
EnumDef **dest) {
auto &enum_def = *new EnumDef();
enum_def.name = enum_name;
enum_def.file = file_being_parsed_;
enum_def.doc_comment = doc_comment_;
enum_def.is_union = is_union;
enum_def.defined_namespace = current_namespace_;
if (enums_.Add(current_namespace_->GetFullyQualifiedName(enum_name),
&enum_def))
return Error("enum already exists: " + enum_name);
enum_def.underlying_type.base_type =
is_union ? BASE_TYPE_UTYPE : BASE_TYPE_INT;
enum_def.underlying_type.enum_def = &enum_def;
if (dest) *dest = &enum_def;
return NoError();
}
CheckedError Parser::ParseProtoFields(StructDef *struct_def, bool isextend,
bool inside_oneof) {
EXPECT('{');
while (token_ != '}') {
if (IsIdent("message") || IsIdent("extend") || IsIdent("enum")) {
// Nested declarations.
ECHECK(ParseProtoDecl());
} else if (IsIdent("extensions")) { // Skip these.
NEXT();
EXPECT(kTokenIntegerConstant);
if (Is(kTokenIdentifier)) {
NEXT(); // to
NEXT(); // num
}
EXPECT(';');
} else if (IsIdent("option")) { // Skip these.
ECHECK(ParseProtoOption());
EXPECT(';');
} else if (IsIdent("reserved")) { // Skip these.
NEXT();
while (!Is(';')) { NEXT(); } // A variety of formats, just skip.
NEXT();
} else {
std::vector<std::string> field_comment = doc_comment_;
// Parse the qualifier.
bool required = false;
bool repeated = false;
bool oneof = false;
if (!inside_oneof) {
if (IsIdent("optional")) {
// This is the default.
NEXT();
} else if (IsIdent("required")) {
required = true;
NEXT();
} else if (IsIdent("repeated")) {
repeated = true;
NEXT();
} else if (IsIdent("oneof")) {
oneof = true;
NEXT();
} else {
// can't error, proto3 allows decls without any of the above.
}
}
StructDef *anonymous_struct = nullptr;
EnumDef *oneof_union = nullptr;
Type type;
if (IsIdent("group") || oneof) {
if (!oneof) NEXT();
if (oneof && opts.proto_oneof_union) {
auto name = MakeCamel(attribute_, true) + "Union";
ECHECK(StartEnum(name, true, &oneof_union));
type = Type(BASE_TYPE_UNION, nullptr, oneof_union);
} else {
auto name = "Anonymous" + NumToString(anonymous_counter++);
ECHECK(StartStruct(name, &anonymous_struct));
type = Type(BASE_TYPE_STRUCT, anonymous_struct);
}
} else {
ECHECK(ParseTypeFromProtoType(&type));
}
// Repeated elements get mapped to a vector.
if (repeated) {
type.element = type.base_type;
type.base_type = BASE_TYPE_VECTOR;
if (type.element == BASE_TYPE_VECTOR) {
// We have a vector or vectors, which FlatBuffers doesn't support.
// For now make it a vector of string (since the source is likely
// "repeated bytes").
// TODO(wvo): A better solution would be to wrap this in a table.
type.element = BASE_TYPE_STRING;
}
}
std::string name = attribute_;
EXPECT(kTokenIdentifier);
if (!oneof) {
// Parse the field id. Since we're just translating schemas, not
// any kind of binary compatibility, we can safely ignore these, and
// assign our own.
EXPECT('=');
EXPECT(kTokenIntegerConstant);
}
FieldDef *field = nullptr;
if (isextend) {
// We allow a field to be re-defined when extending.
// TODO: are there situations where that is problematic?
field = struct_def->fields.Lookup(name);
}
if (!field) ECHECK(AddField(*struct_def, name, type, &field));
field->doc_comment = field_comment;
if (!IsScalar(type.base_type)) field->required = required;
// See if there's a default specified.
if (Is('[')) {
NEXT();
for (;;) {
auto key = attribute_;
ECHECK(ParseProtoKey());
EXPECT('=');
auto val = attribute_;
ECHECK(ParseProtoCurliesOrIdent());
if (key == "default") {
// Temp: skip non-numeric defaults (enums).
auto numeric = strpbrk(val.c_str(), "0123456789-+.");
if (IsScalar(type.base_type) && numeric == val.c_str())
field->value.constant = val;
} else if (key == "deprecated") {
field->deprecated = val == "true";
}
if (!Is(',')) break;
NEXT();
}
EXPECT(']');
}
if (anonymous_struct) {
ECHECK(ParseProtoFields(anonymous_struct, false, oneof));
if (Is(';')) NEXT();
} else if (oneof_union) {
// Parse into a temporary StructDef, then transfer fields into an
// EnumDef describing the oneof as a union.
StructDef oneof_struct;
ECHECK(ParseProtoFields(&oneof_struct, false, oneof));
if (Is(';')) NEXT();
for (auto field_it = oneof_struct.fields.vec.begin();
field_it != oneof_struct.fields.vec.end(); ++field_it) {
const auto &oneof_field = **field_it;
const auto &oneof_type = oneof_field.value.type;
if (oneof_type.base_type != BASE_TYPE_STRUCT ||
!oneof_type.struct_def || oneof_type.struct_def->fixed)
return Error("oneof '" + name +
"' cannot be mapped to a union because member '" +
oneof_field.name + "' is not a table type.");
EnumValBuilder evb(*this, *oneof_union);
auto ev = evb.CreateEnumerator(oneof_type.struct_def->name);
ev->union_type = oneof_type;
ev->doc_comment = oneof_field.doc_comment;
ECHECK(evb.AcceptEnumerator(oneof_field.name));
}
} else {
EXPECT(';');
}
}
}
NEXT();
return NoError();
}
CheckedError Parser::ParseProtoKey() {
if (token_ == '(') {
NEXT();
// Skip "(a.b)" style custom attributes.
while (token_ == '.' || token_ == kTokenIdentifier) NEXT();
EXPECT(')');
while (Is('.')) {
NEXT();
EXPECT(kTokenIdentifier);
}
} else {
EXPECT(kTokenIdentifier);
}
return NoError();
}
CheckedError Parser::ParseProtoCurliesOrIdent() {
if (Is('{')) {
NEXT();
for (int nesting = 1; nesting;) {
if (token_ == '{')
nesting++;
else if (token_ == '}')
nesting--;
NEXT();
}
} else {
NEXT(); // Any single token.
}
return NoError();
}
CheckedError Parser::ParseProtoOption() {
NEXT();
ECHECK(ParseProtoKey());
EXPECT('=');
ECHECK(ParseProtoCurliesOrIdent());
return NoError();
}
// Parse a protobuf type, and map it to the corresponding FlatBuffer one.
CheckedError Parser::ParseTypeFromProtoType(Type *type) {
struct type_lookup {
const char *proto_type;
BaseType fb_type, element;
};
static type_lookup lookup[] = {
{ "float", BASE_TYPE_FLOAT, BASE_TYPE_NONE },
{ "double", BASE_TYPE_DOUBLE, BASE_TYPE_NONE },
{ "int32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "int64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "uint32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "uint64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sint32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sint64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "fixed32", BASE_TYPE_UINT, BASE_TYPE_NONE },
{ "fixed64", BASE_TYPE_ULONG, BASE_TYPE_NONE },
{ "sfixed32", BASE_TYPE_INT, BASE_TYPE_NONE },
{ "sfixed64", BASE_TYPE_LONG, BASE_TYPE_NONE },
{ "bool", BASE_TYPE_BOOL, BASE_TYPE_NONE },
{ "string", BASE_TYPE_STRING, BASE_TYPE_NONE },
{ "bytes", BASE_TYPE_VECTOR, BASE_TYPE_UCHAR },
{ nullptr, BASE_TYPE_NONE, BASE_TYPE_NONE }
};
for (auto tl = lookup; tl->proto_type; tl++) {
if (attribute_ == tl->proto_type) {
type->base_type = tl->fb_type;
type->element = tl->element;
NEXT();
return NoError();
}
}
if (Is('.')) NEXT(); // qualified names may start with a . ?
ECHECK(ParseTypeIdent(*type));
return NoError();
}
CheckedError Parser::SkipAnyJsonValue() {
switch (token_) {
case '{': {
size_t fieldn_outer = 0;
return ParseTableDelimiters(
fieldn_outer, nullptr,
[&](const std::string &, size_t &fieldn,
const StructDef *) -> CheckedError {
ECHECK(Recurse([&]() { return SkipAnyJsonValue(); }));
fieldn++;
return NoError();
});
}
case '[': {
uoffset_t count = 0;
return ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
return Recurse([&]() { return SkipAnyJsonValue(); });
});
}
case kTokenStringConstant:
case kTokenIntegerConstant:
case kTokenFloatConstant: NEXT(); break;
default:
if (IsIdent("true") || IsIdent("false") || IsIdent("null")) {
NEXT();
} else
return TokenError();
}
return NoError();
}
CheckedError Parser::ParseFlexBufferValue(flexbuffers::Builder *builder) {
switch (token_) {
case '{': {
auto start = builder->StartMap();
size_t fieldn_outer = 0;
auto err =
ParseTableDelimiters(fieldn_outer, nullptr,
[&](const std::string &name, size_t &fieldn,
const StructDef *) -> CheckedError {
builder->Key(name);
ECHECK(ParseFlexBufferValue(builder));
fieldn++;
return NoError();
});
ECHECK(err);
builder->EndMap(start);
break;
}
case '[': {
auto start = builder->StartVector();
uoffset_t count = 0;
ECHECK(ParseVectorDelimiters(count, [&](uoffset_t &) -> CheckedError {
return ParseFlexBufferValue(builder);
}));
builder->EndVector(start, false, false);
break;
}
case kTokenStringConstant:
builder->String(attribute_);
EXPECT(kTokenStringConstant);
break;
case kTokenIntegerConstant:
builder->Int(StringToInt(attribute_.c_str()));
EXPECT(kTokenIntegerConstant);
break;
case kTokenFloatConstant:
builder->Double(strtod(attribute_.c_str(), nullptr));
EXPECT(kTokenFloatConstant);
break;
default:
if (IsIdent("true")) {
builder->Bool(true);
NEXT();
} else if (IsIdent("false")) {
builder->Bool(false);
NEXT();
} else if (IsIdent("null")) {
builder->Null();
NEXT();
} else
return TokenError();
}
return NoError();
}
bool Parser::ParseFlexBuffer(const char *source, const char *source_filename,
flexbuffers::Builder *builder) {
auto ok = !StartParseFile(source, source_filename).Check() &&
!ParseFlexBufferValue(builder).Check();
if (ok) builder->Finish();
return ok;
}
bool Parser::Parse(const char *source, const char **include_paths,
const char *source_filename) {
FLATBUFFERS_ASSERT(0 == recurse_protection_counter);
bool r;
if (opts.use_flexbuffers) {
r = ParseFlexBuffer(source, source_filename, &flex_builder_);
} else {
r = !ParseRoot(source, include_paths, source_filename).Check();
}
FLATBUFFERS_ASSERT(0 == recurse_protection_counter);
return r;
}
CheckedError Parser::StartParseFile(const char *source,
const char *source_filename) {
file_being_parsed_ = source_filename ? source_filename : "";
source_ = source;
ResetState(source_);
error_.clear();
ECHECK(SkipByteOrderMark());
NEXT();
if (Is(kTokenEof)) return Error("input file is empty");
return NoError();
}
CheckedError Parser::ParseRoot(const char *source, const char **include_paths,
const char *source_filename) {
ECHECK(DoParse(source, include_paths, source_filename, nullptr));
// Check that all types were defined.
for (auto it = structs_.vec.begin(); it != structs_.vec.end();) {
auto &struct_def = **it;
if (struct_def.predecl) {
if (opts.proto_mode) {
// Protos allow enums to be used before declaration, so check if that
// is the case here.
EnumDef *enum_def = nullptr;
for (size_t components =
struct_def.defined_namespace->components.size() + 1;
components && !enum_def; components--) {
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(
struct_def.name, components - 1);
enum_def = LookupEnum(qualified_name);
}
if (enum_def) {
// This is pretty slow, but a simple solution for now.
auto initial_count = struct_def.refcount;
for (auto struct_it = structs_.vec.begin();
struct_it != structs_.vec.end(); ++struct_it) {
auto &sd = **struct_it;
for (auto field_it = sd.fields.vec.begin();
field_it != sd.fields.vec.end(); ++field_it) {
auto &field = **field_it;
if (field.value.type.struct_def == &struct_def) {
field.value.type.struct_def = nullptr;
field.value.type.enum_def = enum_def;
auto &bt = field.value.type.base_type == BASE_TYPE_VECTOR
? field.value.type.element
: field.value.type.base_type;
FLATBUFFERS_ASSERT(bt == BASE_TYPE_STRUCT);
bt = enum_def->underlying_type.base_type;
struct_def.refcount--;
enum_def->refcount++;
}
}
}
if (struct_def.refcount)
return Error("internal: " + NumToString(struct_def.refcount) + "/" +
NumToString(initial_count) +
" use(s) of pre-declaration enum not accounted for: " +
enum_def->name);
structs_.dict.erase(structs_.dict.find(struct_def.name));
it = structs_.vec.erase(it);
delete &struct_def;
continue; // Skip error.
}
}
auto err = "type referenced but not defined (check namespace): " +
struct_def.name;
if (struct_def.original_location)
err += ", originally at: " + *struct_def.original_location;
return Error(err);
}
++it;
}
// This check has to happen here and not earlier, because only now do we
// know for sure what the type of these are.
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto &enum_def = **it;
if (enum_def.is_union) {
for (auto val_it = enum_def.Vals().begin();
val_it != enum_def.Vals().end(); ++val_it) {
auto &val = **val_it;
if (!SupportsAdvancedUnionFeatures() && val.union_type.struct_def &&
val.union_type.struct_def->fixed)
return Error(
"only tables can be union elements in the generated language: " +
val.name);
}
}
}
return NoError();
}
CheckedError Parser::DoParse(const char *source, const char **include_paths,
const char *source_filename,
const char *include_filename) {
if (source_filename) {
if (included_files_.find(source_filename) == included_files_.end()) {
included_files_[source_filename] =
include_filename ? include_filename : "";
files_included_per_file_[source_filename] = std::set<std::string>();
} else {
return NoError();
}
}
if (!include_paths) {
static const char *current_directory[] = { "", nullptr };
include_paths = current_directory;
}
field_stack_.clear();
builder_.Clear();
// Start with a blank namespace just in case this file doesn't have one.
current_namespace_ = empty_namespace_;
ECHECK(StartParseFile(source, source_filename));
// Includes must come before type declarations:
for (;;) {
// Parse pre-include proto statements if any:
if (opts.proto_mode && (attribute_ == "option" || attribute_ == "syntax" ||
attribute_ == "package")) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("native_include")) {
NEXT();
vector_emplace_back(&native_included_files_, attribute_);
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include") || (opts.proto_mode && IsIdent("import"))) {
NEXT();
if (opts.proto_mode && attribute_ == "public") NEXT();
auto name = flatbuffers::PosixPath(attribute_.c_str());
EXPECT(kTokenStringConstant);
// Look for the file in include_paths.
std::string filepath;
for (auto paths = include_paths; paths && *paths; paths++) {
filepath = flatbuffers::ConCatPathFileName(*paths, name);
if (FileExists(filepath.c_str())) break;
}
if (filepath.empty())
return Error("unable to locate include file: " + name);
if (source_filename)
files_included_per_file_[source_filename].insert(filepath);
if (included_files_.find(filepath) == included_files_.end()) {
// We found an include file that we have not parsed yet.
// Load it and parse it.
std::string contents;
if (!LoadFile(filepath.c_str(), true, &contents))
return Error("unable to load include file: " + name);
ECHECK(DoParse(contents.c_str(), include_paths, filepath.c_str(),
name.c_str()));
// We generally do not want to output code for any included files:
if (!opts.generate_all) MarkGenerated();
// Reset these just in case the included file had them, and the
// parent doesn't.
root_struct_def_ = nullptr;
file_identifier_.clear();
file_extension_.clear();
// This is the easiest way to continue this file after an include:
// instead of saving and restoring all the state, we simply start the
// file anew. This will cause it to encounter the same include
// statement again, but this time it will skip it, because it was
// entered into included_files_.
// This is recursive, but only go as deep as the number of include
// statements.
if (source_filename) { included_files_.erase(source_filename); }
return DoParse(source, include_paths, source_filename,
include_filename);
}
EXPECT(';');
} else {
break;
}
}
// Now parse all other kinds of declarations:
while (token_ != kTokenEof) {
if (opts.proto_mode) {
ECHECK(ParseProtoDecl());
} else if (IsIdent("namespace")) {
ECHECK(ParseNamespace());
} else if (token_ == '{') {
if (!root_struct_def_)
return Error("no root type set to parse json with");
if (builder_.GetSize()) {
return Error("cannot have more than one json object in a file");
}
uoffset_t toff;
ECHECK(ParseTable(*root_struct_def_, nullptr, &toff));
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(
Offset<Table>(toff),
file_identifier_.length() ? file_identifier_.c_str() : nullptr);
} else {
builder_.Finish(Offset<Table>(toff), file_identifier_.length()
? file_identifier_.c_str()
: nullptr);
}
// Check that JSON file doesn't contain more objects or IDL directives.
// Comments after JSON are allowed.
EXPECT(kTokenEof);
} else if (IsIdent("enum")) {
ECHECK(ParseEnum(false, nullptr));
} else if (IsIdent("union")) {
ECHECK(ParseEnum(true, nullptr));
} else if (IsIdent("root_type")) {
NEXT();
auto root_type = attribute_;
EXPECT(kTokenIdentifier);
ECHECK(ParseNamespacing(&root_type, nullptr));
if (opts.root_type.empty()) {
if (!SetRootType(root_type.c_str()))
return Error("unknown root type: " + root_type);
if (root_struct_def_->fixed) return Error("root type must be a table");
}
EXPECT(';');
} else if (IsIdent("file_identifier")) {
NEXT();
file_identifier_ = attribute_;
EXPECT(kTokenStringConstant);
if (file_identifier_.length() != FlatBufferBuilder::kFileIdentifierLength)
return Error("file_identifier must be exactly " +
NumToString(FlatBufferBuilder::kFileIdentifierLength) +
" characters");
EXPECT(';');
} else if (IsIdent("file_extension")) {
NEXT();
file_extension_ = attribute_;
EXPECT(kTokenStringConstant);
EXPECT(';');
} else if (IsIdent("include")) {
return Error("includes must come before declarations");
} else if (IsIdent("attribute")) {
NEXT();
auto name = attribute_;
if (Is(kTokenIdentifier)) {
NEXT();
} else {
EXPECT(kTokenStringConstant);
}
EXPECT(';');
known_attributes_[name] = false;
} else if (IsIdent("rpc_service")) {
ECHECK(ParseService());
} else {
ECHECK(ParseDecl());
}
}
return NoError();
}
std::set<std::string> Parser::GetIncludedFilesRecursive(
const std::string &file_name) const {
std::set<std::string> included_files;
std::list<std::string> to_process;
if (file_name.empty()) return included_files;
to_process.push_back(file_name);
while (!to_process.empty()) {
std::string current = to_process.front();
to_process.pop_front();
included_files.insert(current);
// Workaround the lack of const accessor in C++98 maps.
auto &new_files =
(*const_cast<std::map<std::string, std::set<std::string>> *>(
&files_included_per_file_))[current];
for (auto it = new_files.begin(); it != new_files.end(); ++it) {
if (included_files.find(*it) == included_files.end())
to_process.push_back(*it);
}
}
return included_files;
}
// Schema serialization functionality:
template<typename T> bool compareName(const T *a, const T *b) {
return a->defined_namespace->GetFullyQualifiedName(a->name) <
b->defined_namespace->GetFullyQualifiedName(b->name);
}
template<typename T> void AssignIndices(const std::vector<T *> &defvec) {
// Pre-sort these vectors, such that we can set the correct indices for them.
auto vec = defvec;
std::sort(vec.begin(), vec.end(), compareName<T>);
for (int i = 0; i < static_cast<int>(vec.size()); i++) vec[i]->index = i;
}
void Parser::Serialize() {
builder_.Clear();
AssignIndices(structs_.vec);
AssignIndices(enums_.vec);
std::vector<Offset<reflection::Object>> object_offsets;
for (auto it = structs_.vec.begin(); it != structs_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
object_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Enum>> enum_offsets;
for (auto it = enums_.vec.begin(); it != enums_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
enum_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
std::vector<Offset<reflection::Service>> service_offsets;
for (auto it = services_.vec.begin(); it != services_.vec.end(); ++it) {
auto offset = (*it)->Serialize(&builder_, *this);
service_offsets.push_back(offset);
(*it)->serialized_location = offset.o;
}
auto objs__ = builder_.CreateVectorOfSortedTables(&object_offsets);
auto enum__ = builder_.CreateVectorOfSortedTables(&enum_offsets);
auto fiid__ = builder_.CreateString(file_identifier_);
auto fext__ = builder_.CreateString(file_extension_);
auto serv__ = builder_.CreateVectorOfSortedTables(&service_offsets);
auto schema_offset = reflection::CreateSchema(
builder_, objs__, enum__, fiid__, fext__,
(root_struct_def_ ? root_struct_def_->serialized_location : 0), serv__);
if (opts.size_prefixed) {
builder_.FinishSizePrefixed(schema_offset, reflection::SchemaIdentifier());
} else {
builder_.Finish(schema_offset, reflection::SchemaIdentifier());
}
}
static Namespace *GetNamespace(
const std::string &qualified_name, std::vector<Namespace *> &namespaces,
std::map<std::string, Namespace *> &namespaces_index) {
size_t dot = qualified_name.find_last_of('.');
std::string namespace_name = (dot != std::string::npos)
? std::string(qualified_name.c_str(), dot)
: "";
Namespace *&ns = namespaces_index[namespace_name];
if (!ns) {
ns = new Namespace();
namespaces.push_back(ns);
size_t pos = 0;
for (;;) {
dot = qualified_name.find('.', pos);
if (dot == std::string::npos) { break; }
ns->components.push_back(qualified_name.substr(pos, dot - pos));
pos = dot + 1;
}
}
return ns;
}
Offset<reflection::Object> StructDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::Field>> field_offsets;
for (auto it = fields.vec.begin(); it != fields.vec.end(); ++it) {
field_offsets.push_back((*it)->Serialize(
builder, static_cast<uint16_t>(it - fields.vec.begin()), parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto flds__ = builder->CreateVectorOfSortedTables(&field_offsets);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateObject(*builder, name__, flds__, fixed,
static_cast<int>(minalign),
static_cast<int>(bytesize), attr__, docs__);
}
bool StructDef::Deserialize(Parser &parser, const reflection::Object *object) {
if (!DeserializeAttributes(parser, object->attributes())) return false;
DeserializeDoc(doc_comment, object->documentation());
name = parser.UnqualifiedName(object->name()->str());
predecl = false;
sortbysize = attributes.Lookup("original_order") == nullptr && !fixed;
const auto &of = *(object->fields());
auto indexes = std::vector<uoffset_t>(of.size());
for (uoffset_t i = 0; i < of.size(); i++) indexes[of.Get(i)->id()] = i;
size_t tmp_struct_size = 0;
for (size_t i = 0; i < indexes.size(); i++) {
auto field = of.Get(indexes[i]);
auto field_def = new FieldDef();
if (!field_def->Deserialize(parser, field) ||
fields.Add(field_def->name, field_def)) {
delete field_def;
return false;
}
if (fixed) {
// Recompute padding since that's currently not serialized.
auto size = InlineSize(field_def->value.type);
auto next_field =
i + 1 < indexes.size() ? of.Get(indexes[i + 1]) : nullptr;
tmp_struct_size += size;
field_def->padding =
next_field ? (next_field->offset() - field_def->value.offset) - size
: PaddingBytes(tmp_struct_size, minalign);
tmp_struct_size += field_def->padding;
}
}
FLATBUFFERS_ASSERT(static_cast<int>(tmp_struct_size) == object->bytesize());
return true;
}
Offset<reflection::Field> FieldDef::Serialize(FlatBufferBuilder *builder,
uint16_t id,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto type__ = value.type.Serialize(builder);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateField(
*builder, name__, type__, id, value.offset,
// Is uint64>max(int64) tested?
IsInteger(value.type.base_type) ? StringToInt(value.constant.c_str()) : 0,
// result may be platform-dependent if underlying is float (not double)
IsFloat(value.type.base_type) ? strtod(value.constant.c_str(), nullptr)
: 0.0,
deprecated, required, key, attr__, docs__);
// TODO: value.constant is almost always "0", we could save quite a bit of
// space by sharing it. Same for common values of value.type.
}
bool FieldDef::Deserialize(Parser &parser, const reflection::Field *field) {
name = field->name()->str();
defined_namespace = parser.current_namespace_;
if (!value.type.Deserialize(parser, field->type())) return false;
value.offset = field->offset();
if (IsInteger(value.type.base_type)) {
value.constant = NumToString(field->default_integer());
} else if (IsFloat(value.type.base_type)) {
value.constant = FloatToString(field->default_real(), 16);
size_t last_zero = value.constant.find_last_not_of('0');
if (last_zero != std::string::npos && last_zero != 0) {
value.constant.erase(last_zero, std::string::npos);
}
}
deprecated = field->deprecated();
required = field->required();
key = field->key();
if (!DeserializeAttributes(parser, field->attributes())) return false;
// TODO: this should probably be handled by a separate attribute
if (attributes.Lookup("flexbuffer")) {
flexbuffer = true;
parser.uses_flexbuffers_ = true;
if (value.type.base_type != BASE_TYPE_VECTOR ||
value.type.element != BASE_TYPE_UCHAR)
return false;
}
if (auto nested = attributes.Lookup("nested_flatbuffer")) {
auto nested_qualified_name =
parser.current_namespace_->GetFullyQualifiedName(nested->constant);
nested_flatbuffer = parser.LookupStruct(nested_qualified_name);
if (!nested_flatbuffer) return false;
}
DeserializeDoc(doc_comment, field->documentation());
return true;
}
Offset<reflection::RPCCall> RPCCall::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateRPCCall(
*builder, name__, request->serialized_location,
response->serialized_location, attr__, docs__);
}
bool RPCCall::Deserialize(Parser &parser, const reflection::RPCCall *call) {
name = call->name()->str();
if (!DeserializeAttributes(parser, call->attributes())) return false;
DeserializeDoc(doc_comment, call->documentation());
request = parser.structs_.Lookup(call->request()->name()->str());
response = parser.structs_.Lookup(call->response()->name()->str());
if (!request || !response) { return false; }
return true;
}
Offset<reflection::Service> ServiceDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::RPCCall>> servicecall_offsets;
for (auto it = calls.vec.begin(); it != calls.vec.end(); ++it) {
servicecall_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto call__ = builder->CreateVector(servicecall_offsets);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateService(*builder, name__, call__, attr__, docs__);
}
bool ServiceDef::Deserialize(Parser &parser,
const reflection::Service *service) {
name = parser.UnqualifiedName(service->name()->str());
if (service->calls()) {
for (uoffset_t i = 0; i < service->calls()->size(); ++i) {
auto call = new RPCCall();
if (!call->Deserialize(parser, service->calls()->Get(i)) ||
calls.Add(call->name, call)) {
delete call;
return false;
}
}
}
if (!DeserializeAttributes(parser, service->attributes())) return false;
DeserializeDoc(doc_comment, service->documentation());
return true;
}
Offset<reflection::Enum> EnumDef::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<Offset<reflection::EnumVal>> enumval_offsets;
for (auto it = vals.vec.begin(); it != vals.vec.end(); ++it) {
enumval_offsets.push_back((*it)->Serialize(builder, parser));
}
auto qualified_name = defined_namespace->GetFullyQualifiedName(name);
auto name__ = builder->CreateString(qualified_name);
auto vals__ = builder->CreateVector(enumval_offsets);
auto type__ = underlying_type.Serialize(builder);
auto attr__ = SerializeAttributes(builder, parser);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateEnum(*builder, name__, vals__, is_union, type__,
attr__, docs__);
}
bool EnumDef::Deserialize(Parser &parser, const reflection::Enum *_enum) {
name = parser.UnqualifiedName(_enum->name()->str());
for (uoffset_t i = 0; i < _enum->values()->size(); ++i) {
auto val = new EnumVal();
if (!val->Deserialize(parser, _enum->values()->Get(i)) ||
vals.Add(val->name, val)) {
delete val;
return false;
}
}
is_union = _enum->is_union();
if (!underlying_type.Deserialize(parser, _enum->underlying_type())) {
return false;
}
if (!DeserializeAttributes(parser, _enum->attributes())) return false;
DeserializeDoc(doc_comment, _enum->documentation());
return true;
}
Offset<reflection::EnumVal> EnumVal::Serialize(FlatBufferBuilder *builder,
const Parser &parser) const {
auto name__ = builder->CreateString(name);
auto type__ = union_type.Serialize(builder);
auto docs__ = parser.opts.binary_schema_comments
? builder->CreateVectorOfStrings(doc_comment)
: 0;
return reflection::CreateEnumVal(
*builder, name__, value,
union_type.struct_def ? union_type.struct_def->serialized_location : 0,
type__, docs__);
}
bool EnumVal::Deserialize(const Parser &parser,
const reflection::EnumVal *val) {
name = val->name()->str();
value = val->value();
if (!union_type.Deserialize(parser, val->union_type())) return false;
DeserializeDoc(doc_comment, val->documentation());
return true;
}
Offset<reflection::Type> Type::Serialize(FlatBufferBuilder *builder) const {
return reflection::CreateType(
*builder, static_cast<reflection::BaseType>(base_type),
static_cast<reflection::BaseType>(element),
struct_def ? struct_def->index : (enum_def ? enum_def->index : -1),
fixed_length);
}
bool Type::Deserialize(const Parser &parser, const reflection::Type *type) {
if (type == nullptr) return true;
base_type = static_cast<BaseType>(type->base_type());
element = static_cast<BaseType>(type->element());
fixed_length = type->fixed_length();
if (type->index() >= 0) {
bool is_series = type->base_type() == reflection::Vector ||
type->base_type() == reflection::Array;
if (type->base_type() == reflection::Obj ||
(is_series && type->element() == reflection::Obj)) {
if (static_cast<size_t>(type->index()) < parser.structs_.vec.size()) {
struct_def = parser.structs_.vec[type->index()];
struct_def->refcount++;
} else {
return false;
}
} else {
if (static_cast<size_t>(type->index()) < parser.enums_.vec.size()) {
enum_def = parser.enums_.vec[type->index()];
} else {
return false;
}
}
}
return true;
}
flatbuffers::Offset<
flatbuffers::Vector<flatbuffers::Offset<reflection::KeyValue>>>
Definition::SerializeAttributes(FlatBufferBuilder *builder,
const Parser &parser) const {
std::vector<flatbuffers::Offset<reflection::KeyValue>> attrs;
for (auto kv = attributes.dict.begin(); kv != attributes.dict.end(); ++kv) {
auto it = parser.known_attributes_.find(kv->first);
FLATBUFFERS_ASSERT(it != parser.known_attributes_.end());
if (parser.opts.binary_schema_builtins || !it->second) {
auto key = builder->CreateString(kv->first);
auto val = builder->CreateString(kv->second->constant);
attrs.push_back(reflection::CreateKeyValue(*builder, key, val));
}
}
if (attrs.size()) {
return builder->CreateVectorOfSortedTables(&attrs);
} else {
return 0;
}
}
bool Definition::DeserializeAttributes(
Parser &parser, const Vector<Offset<reflection::KeyValue>> *attrs) {
if (attrs == nullptr) return true;
for (uoffset_t i = 0; i < attrs->size(); ++i) {
auto kv = attrs->Get(i);
auto value = new Value();
if (kv->value()) { value->constant = kv->value()->str(); }
if (attributes.Add(kv->key()->str(), value)) {
delete value;
return false;
}
parser.known_attributes_[kv->key()->str()];
}
return true;
}
/************************************************************************/
/* DESERIALIZATION */
/************************************************************************/
bool Parser::Deserialize(const uint8_t *buf, const size_t size) {
flatbuffers::Verifier verifier(reinterpret_cast<const uint8_t *>(buf), size);
bool size_prefixed = false;
if (!reflection::SchemaBufferHasIdentifier(buf)) {
if (!flatbuffers::BufferHasIdentifier(buf, reflection::SchemaIdentifier(),
true))
return false;
else
size_prefixed = true;
}
auto verify_fn = size_prefixed ? &reflection::VerifySizePrefixedSchemaBuffer
: &reflection::VerifySchemaBuffer;
if (!verify_fn(verifier)) { return false; }
auto schema = size_prefixed ? reflection::GetSizePrefixedSchema(buf)
: reflection::GetSchema(buf);
return Deserialize(schema);
}
bool Parser::Deserialize(const reflection::Schema *schema) {
file_identifier_ = schema->file_ident() ? schema->file_ident()->str() : "";
file_extension_ = schema->file_ext() ? schema->file_ext()->str() : "";
std::map<std::string, Namespace *> namespaces_index;
// Create defs without deserializing so references from fields to structs and
// enums can be resolved.
for (auto it = schema->objects()->begin(); it != schema->objects()->end();
++it) {
auto struct_def = new StructDef();
struct_def->bytesize = it->bytesize();
struct_def->fixed = it->is_struct();
struct_def->minalign = it->minalign();
if (structs_.Add(it->name()->str(), struct_def)) {
delete struct_def;
return false;
}
auto type = new Type(BASE_TYPE_STRUCT, struct_def, nullptr);
if (types_.Add(it->name()->str(), type)) {
delete type;
return false;
}
}
for (auto it = schema->enums()->begin(); it != schema->enums()->end(); ++it) {
auto enum_def = new EnumDef();
if (enums_.Add(it->name()->str(), enum_def)) {
delete enum_def;
return false;
}
auto type = new Type(BASE_TYPE_UNION, nullptr, enum_def);
if (types_.Add(it->name()->str(), type)) {
delete type;
return false;
}
}
// Now fields can refer to structs and enums by index.
for (auto it = schema->objects()->begin(); it != schema->objects()->end();
++it) {
std::string qualified_name = it->name()->str();
auto struct_def = structs_.Lookup(qualified_name);
struct_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!struct_def->Deserialize(*this, *it)) { return false; }
if (schema->root_table() == *it) { root_struct_def_ = struct_def; }
}
for (auto it = schema->enums()->begin(); it != schema->enums()->end(); ++it) {
std::string qualified_name = it->name()->str();
auto enum_def = enums_.Lookup(qualified_name);
enum_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!enum_def->Deserialize(*this, *it)) { return false; }
}
if (schema->services()) {
for (auto it = schema->services()->begin(); it != schema->services()->end();
++it) {
std::string qualified_name = it->name()->str();
auto service_def = new ServiceDef();
service_def->defined_namespace =
GetNamespace(qualified_name, namespaces_, namespaces_index);
if (!service_def->Deserialize(*this, *it) ||
services_.Add(qualified_name, service_def)) {
delete service_def;
return false;
}
}
}
return true;
}
std::string Parser::ConformTo(const Parser &base) {
for (auto sit = structs_.vec.begin(); sit != structs_.vec.end(); ++sit) {
auto &struct_def = **sit;
auto qualified_name =
struct_def.defined_namespace->GetFullyQualifiedName(struct_def.name);
auto struct_def_base = base.LookupStruct(qualified_name);
if (!struct_def_base) continue;
for (auto fit = struct_def.fields.vec.begin();
fit != struct_def.fields.vec.end(); ++fit) {
auto &field = **fit;
auto field_base = struct_def_base->fields.Lookup(field.name);
if (field_base) {
if (field.value.offset != field_base->value.offset)
return "offsets differ for field: " + field.name;
if (field.value.constant != field_base->value.constant)
return "defaults differ for field: " + field.name;
if (!EqualByName(field.value.type, field_base->value.type))
return "types differ for field: " + field.name;
} else {
// Doesn't have to exist, deleting fields is fine.
// But we should check if there is a field that has the same offset
// but is incompatible (in the case of field renaming).
for (auto fbit = struct_def_base->fields.vec.begin();
fbit != struct_def_base->fields.vec.end(); ++fbit) {
field_base = *fbit;
if (field.value.offset == field_base->value.offset) {
if (!EqualByName(field.value.type, field_base->value.type))
return "field renamed to different type: " + field.name;
break;
}
}
}
}
}
for (auto eit = enums_.vec.begin(); eit != enums_.vec.end(); ++eit) {
auto &enum_def = **eit;
auto qualified_name =
enum_def.defined_namespace->GetFullyQualifiedName(enum_def.name);
auto enum_def_base = base.enums_.Lookup(qualified_name);
if (!enum_def_base) continue;
for (auto evit = enum_def.Vals().begin(); evit != enum_def.Vals().end();
++evit) {
auto &enum_val = **evit;
auto enum_val_base = enum_def_base->Lookup(enum_val.name);
if (enum_val_base) {
if (enum_val != *enum_val_base)
return "values differ for enum: " + enum_val.name;
}
}
}
return "";
}
} // namespace flatbuffers
| 1 | 17,150 | Sorry, this still makes no sense.. the value of any union or enum is simply a name integer constant, it has nothing to do with the `BASE_TYPE_` enum. This value should be `0`. In particular: `Every union has the NONE field, which always has value 0`. | google-flatbuffers | java |
@@ -38,4 +38,6 @@ public interface Alerter {
void alertOnFailedExecutorHealthCheck(Executor executor,
List<ExecutableFlow> executions,
ExecutorManagerException e, List<String> alertEmails);
+
+ String getAzkabanURL();
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.alert;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorManagerException;
import azkaban.sla.SlaOption;
import java.util.List;
public interface Alerter {
void alertOnSuccess(ExecutableFlow exflow) throws Exception;
void alertOnError(ExecutableFlow exflow, String... extraReasons) throws Exception;
void alertOnFirstError(ExecutableFlow exflow) throws Exception;
void alertOnSla(SlaOption slaOption, String slaMessage) throws Exception;
void alertOnFailedUpdate(Executor executor, List<ExecutableFlow> executions,
ExecutorManagerException e);
void alertOnFailedExecutorHealthCheck(Executor executor,
List<ExecutableFlow> executions,
ExecutorManagerException e, List<String> alertEmails);
}
| 1 | 22,480 | Looks like getAzkabanURL() is added after concrete Alerter (Emailer)::getAzkabanURL(), so that Override annotation is needed. | azkaban-azkaban | java |
@@ -316,6 +316,8 @@ class BaseDetector(nn.Module, metaclass=ABCMeta):
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i]
+ if mask.dtype != np.bool:
+ mask = np.array(mask, dtype=bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None: | 1 | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from mmcv.runner import auto_fp16
from mmcv.utils import print_log
from mmdet.utils import get_root_logger
class BaseDetector(nn.Module, metaclass=ABCMeta):
"""Base class for detectors."""
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self):
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self):
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self):
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
@abstractmethod
def extract_feat(self, imgs):
"""Extract features from images."""
pass
def extract_feats(self, imgs):
"""Extract features from multiple images.
Args:
imgs (list[torch.Tensor]): A list of images. The images are
augmented from the same image but in different ways.
Returns:
list[torch.Tensor]: Features of different images
"""
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Test function with test time augmentation."""
pass
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if pretrained is not None:
logger = get_root_logger()
print_log(f'load model from: {pretrained}', logger=logger)
async def aforward_test(self, *, img, img_metas, **kwargs):
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(img)}) '
f'!= num of image metas ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = img[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
# proposals (List[List[Tensor]]): the outer list indicates
# test-time augs (multiscale, flip, etc.) and the inner list
# indicates images in a batch.
# The Tensor should have a shape Px4, where P is the number of
# proposals.
if 'proposals' in kwargs:
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
assert imgs[0].size(0) == 1, 'aug test does not support ' \
'inference with batch size ' \
f'{imgs[0].size(0)}'
# TODO: support test augmentation for predefined proposals
assert 'proposals' not in kwargs
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary infomation.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
``num_samples``.
- ``loss`` is a tensor for back propagation, which can be a \
weighted sum of multiple losses.
- ``log_vars`` contains all the variables to be sent to the
logger.
- ``num_samples`` indicates the batch size (when the model is \
DDP, it means the batch size on each GPU), which is used for \
averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def val_step(self, data, optimizer):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color='green',
text_color='green',
thickness=1,
font_scale=0.5,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
text_color (str or tuple or :obj:`Color`): Color of texts.
thickness (int): Thickness of lines.
font_scale (float): Font scales of texts.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
np.random.seed(42)
color_masks = [
np.random.randint(0, 256, (1, 3), dtype=np.uint8)
for _ in range(max(labels) + 1)
]
for i in inds:
i = int(i)
color_mask = color_masks[labels[i]]
mask = segms[i]
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
mmcv.imshow_det_bboxes(
img,
bboxes,
labels,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
thickness=thickness,
font_scale=font_scale,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 1 | 21,536 | The above 3 lines can be written as: `mask = segms[i].astype(bool)` | open-mmlab-mmdetection | py |
@@ -27,6 +27,7 @@ import (
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
+ "github.com/iotexproject/iotex-address/address"
)
// Flags | 1 | // Copyright (c) 2019 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package action
import (
"context"
"encoding/hex"
"fmt"
"math/big"
"syscall"
"github.com/golang/protobuf/proto"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/crypto/ssh/terminal"
"google.golang.org/grpc/status"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/account"
"github.com/iotexproject/iotex-core/cli/ioctl/cmd/config"
"github.com/iotexproject/iotex-core/cli/ioctl/util"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/protogen/iotexapi"
)
// Flags
var (
gasLimit uint64
gasPrice string
nonce uint64
signer string
bytecode []byte
)
// ActionCmd represents the account command
var ActionCmd = &cobra.Command{
Use: "action",
Short: "Manage actions of IoTeX blockchain",
}
func init() {
ActionCmd.AddCommand(actionHashCmd)
ActionCmd.AddCommand(actionTransferCmd)
ActionCmd.AddCommand(actionDeployCmd)
ActionCmd.AddCommand(actionInvokeCmd)
ActionCmd.AddCommand(actionReadCmd)
ActionCmd.AddCommand(actionClaimCmd)
ActionCmd.AddCommand(actionDepositCmd)
ActionCmd.PersistentFlags().StringVar(&config.ReadConfig.Endpoint, "endpoint",
config.ReadConfig.Endpoint, "set endpoint for once")
ActionCmd.PersistentFlags().BoolVar(&config.Insecure, "insecure", config.Insecure,
"insecure connection for once")
setActionFlags(actionTransferCmd, actionDeployCmd, actionInvokeCmd, actionReadCmd, actionClaimCmd,
actionDepositCmd)
}
func setActionFlags(cmds ...*cobra.Command) {
for _, cmd := range cmds {
cmd.Flags().Uint64VarP(&gasLimit, "gas-limit", "l", 0, "set gas limit")
cmd.Flags().StringVarP(&gasPrice, "gas-price", "p", "1",
"set gas price (unit: 10^(-6)Iotx)")
cmd.Flags().StringVarP(&signer, "signer", "s", "", "choose a signing account")
cmd.Flags().Uint64VarP(&nonce, "nonce", "n", 0, "set nonce")
cmd.MarkFlagRequired("signer")
if cmd == actionDeployCmd || cmd == actionInvokeCmd || cmd == actionReadCmd {
cmd.Flags().BytesHexVarP(&bytecode, "bytecode", "b", nil, "set the byte code")
cmd.MarkFlagRequired("gas-limit")
cmd.MarkFlagRequired("bytecode")
}
}
}
// GetGasPrice gets the suggest gas price
func GetGasPrice() (*big.Int, error) {
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return nil, err
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
request := &iotexapi.SuggestGasPriceRequest{}
response, err := cli.SuggestGasPrice(ctx, request)
if err != nil {
return nil, err
}
return new(big.Int).SetUint64(response.GasPrice), nil
}
func sendAction(elp action.Envelope, readOnly bool) (string, error) {
fmt.Printf("Enter password #%s:\n", signer)
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.L().Error("failed to get password", zap.Error(err))
return "", err
}
prvKey, err := account.KsAccountToPrivateKey(signer, string(bytePassword))
if err != nil {
return "", err
}
defer prvKey.Zero()
sealed, err := action.Sign(elp, prvKey)
prvKey.Zero()
if err != nil {
log.L().Error("failed to sign action", zap.Error(err))
return "", err
}
selp := sealed.Proto()
actionInfo, err := printActionProto(selp)
if err != nil {
return "", err
}
var confirm string
fmt.Println("\n" + actionInfo + "\n" +
"Please confirm your action.\n" +
"Type 'YES' to continue, quit for anything else.")
fmt.Scanf("%s", &confirm)
if confirm != "YES" && confirm != "yes" {
return "Quit", nil
}
fmt.Println()
conn, err := util.ConnectToEndpoint(config.ReadConfig.SecureConnect && !config.Insecure)
if err != nil {
return "", err
}
defer conn.Close()
cli := iotexapi.NewAPIServiceClient(conn)
ctx := context.Background()
if readOnly {
request := &iotexapi.ReadContractRequest{Action: selp}
res, err := cli.ReadContract(ctx, request)
if err != nil {
if sta, ok := status.FromError(err); ok {
return "", fmt.Errorf(sta.Message())
}
return "", err
}
return res.Data, nil
}
request := &iotexapi.SendActionRequest{Action: selp}
if _, err = cli.SendAction(ctx, request); err != nil {
if sta, ok := status.FromError(err); ok {
return "", fmt.Errorf(sta.Message())
}
return "", err
}
shash := hash.Hash256b(byteutil.Must(proto.Marshal(selp)))
return "Action has been sent to blockchain.\n" +
"Wait for several seconds and query this action by hash:\n" +
hex.EncodeToString(shash[:]), nil
}
| 1 | 17,623 | File is not `gofmt`-ed with `-s` (from `gofmt`) | iotexproject-iotex-core | go |
@@ -631,7 +631,10 @@ void nano::node::start ()
{
network.port = bootstrap.port;
}
+
+ logger.always_log (boost::str (boost::format ("Node started with peering port `%1%`.") % network.port));
}
+
if (!flags.disable_backup)
{
backup_wallet (); | 1 | #include <nano/lib/threading.hpp>
#include <nano/lib/tomlconfig.hpp>
#include <nano/lib/utility.hpp>
#include <nano/node/common.hpp>
#include <nano/node/daemonconfig.hpp>
#include <nano/node/node.hpp>
#include <nano/node/rocksdb/rocksdb.hpp>
#include <nano/node/telemetry.hpp>
#include <nano/node/websocket.hpp>
#include <nano/rpc/rpc.hpp>
#include <nano/secure/buffer.hpp>
#include <nano/test_common/system.hpp>
#include <boost/filesystem.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <algorithm>
#include <cstdlib>
#include <future>
#include <sstream>
double constexpr nano::node::price_max;
double constexpr nano::node::free_cutoff;
std::size_t constexpr nano::block_arrival::arrival_size_min;
std::chrono::seconds constexpr nano::block_arrival::arrival_time_min;
namespace nano
{
extern unsigned char nano_bootstrap_weights_live[];
extern std::size_t nano_bootstrap_weights_live_size;
extern unsigned char nano_bootstrap_weights_beta[];
extern std::size_t nano_bootstrap_weights_beta_size;
}
void nano::node::keepalive (std::string const & address_a, uint16_t port_a)
{
auto node_l (shared_from_this ());
network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (address_a, std::to_string (port_a)), [node_l, address_a, port_a] (boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
auto endpoint (nano::transport::map_endpoint_to_v6 (i->endpoint ()));
std::weak_ptr<nano::node> node_w (node_l);
auto channel (node_l->network.find_channel (endpoint));
if (!channel)
{
node_l->network.tcp_channels.start_tcp (endpoint);
}
else
{
node_l->network.send_keepalive (channel);
}
}
}
else
{
node_l->logger.try_log (boost::str (boost::format ("Error resolving address: %1%:%2%: %3%") % address_a % port_a % ec.message ()));
}
});
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (rep_crawler & rep_crawler, std::string const & name)
{
std::size_t count;
{
nano::lock_guard<nano::mutex> guard (rep_crawler.active_mutex);
count = rep_crawler.active.size ();
}
auto const sizeof_element = sizeof (decltype (rep_crawler.active)::value_type);
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "active", count, sizeof_element }));
return composite;
}
nano::node::node (boost::asio::io_context & io_ctx_a, uint16_t peering_port_a, boost::filesystem::path const & application_path_a, nano::logging const & logging_a, nano::work_pool & work_a, nano::node_flags flags_a, unsigned seq) :
node (io_ctx_a, application_path_a, nano::node_config (peering_port_a, logging_a), work_a, flags_a, seq)
{
}
nano::node::node (boost::asio::io_context & io_ctx_a, boost::filesystem::path const & application_path_a, nano::node_config const & config_a, nano::work_pool & work_a, nano::node_flags flags_a, unsigned seq) :
write_database_queue (!flags_a.force_use_write_database_queue && (config_a.rocksdb_config.enable)),
io_ctx (io_ctx_a),
node_initialized_latch (1),
config (config_a),
network_params{ config.network_params },
stats (config.stat_config),
workers (std::max (3u, config.io_threads / 4), nano::thread_role::name::worker),
flags (flags_a),
work (work_a),
distributed_work (*this),
logger (config_a.logging.min_time_between_log_output),
store_impl (nano::make_store (logger, application_path_a, network_params.ledger, flags.read_only, true, config_a.rocksdb_config, config_a.diagnostics_config.txn_tracking, config_a.block_processor_batch_max_time, config_a.lmdb_config, config_a.backup_before_upgrade)),
store (*store_impl),
wallets_store_impl (std::make_unique<nano::mdb_wallets_store> (application_path_a / "wallets.ldb", config_a.lmdb_config)),
wallets_store (*wallets_store_impl),
gap_cache (*this),
ledger (store, stats, network_params.ledger, flags_a.generate_cache),
checker (config.signature_checker_threads),
network (*this, config.peering_port),
telemetry (std::make_shared<nano::telemetry> (network, workers, observers.telemetry, stats, network_params, flags.disable_ongoing_telemetry_requests)),
bootstrap_initiator (*this),
// BEWARE: `bootstrap` takes `network.port` instead of `config.peering_port` because when the user doesn't specify
// a peering port and wants the OS to pick one, the picking happens when `network` gets initialized
// (if UDP is active, otherwise it happens when `bootstrap` gets initialized), so then for TCP traffic
// we want to tell `bootstrap` to use the already picked port instead of itself picking a different one.
// Thus, be very careful if you change the order: if `bootstrap` gets constructed before `network`,
// the latter would inherit the port from the former (if TCP is active, otherwise `network` picks first)
//
bootstrap (network.port, *this),
application_path (application_path_a),
port_mapping (*this),
rep_crawler (*this),
vote_processor (checker, active, observers, stats, config, flags, logger, online_reps, rep_crawler, ledger, network_params),
warmed_up (0),
block_processor (*this, write_database_queue),
online_reps (ledger, config),
history{ config.network_params.voting },
vote_uniquer (block_uniquer),
confirmation_height_processor (ledger, write_database_queue, config.conf_height_processor_batch_min_time, config.logging, logger, node_initialized_latch, flags.confirmation_height_processor_mode),
active (*this, confirmation_height_processor),
scheduler{ *this },
aggregator (config, stats, active.generator, active.final_generator, history, ledger, wallets, active),
wallets (wallets_store.init_error (), *this),
startup_time (std::chrono::steady_clock::now ()),
node_seq (seq)
{
if (!init_error ())
{
telemetry->start ();
active.vacancy_update = [this] () { scheduler.notify (); };
if (config.websocket_config.enabled)
{
auto endpoint_l (nano::tcp_endpoint (boost::asio::ip::make_address_v6 (config.websocket_config.address), config.websocket_config.port));
websocket_server = std::make_shared<nano::websocket::listener> (config.websocket_config.tls_config, logger, wallets, io_ctx, endpoint_l);
this->websocket_server->run ();
}
wallets.observer = [this] (bool active) {
observers.wallet.notify (active);
};
network.channel_observer = [this] (std::shared_ptr<nano::transport::channel> const & channel_a) {
debug_assert (channel_a != nullptr);
observers.endpoint.notify (channel_a);
};
network.disconnect_observer = [this] () {
observers.disconnect.notify ();
};
if (!config.callback_address.empty ())
{
observers.blocks.add ([this] (nano::election_status const & status_a, std::vector<nano::vote_with_weight_info> const & votes_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a, bool is_state_epoch_a) {
auto block_a (status_a.winner);
if ((status_a.type == nano::election_status_type::active_confirmed_quorum || status_a.type == nano::election_status_type::active_confirmation_height) && this->block_arrival.recent (block_a->hash ()))
{
auto node_l (shared_from_this ());
background ([node_l, block_a, account_a, amount_a, is_state_send_a, is_state_epoch_a] () {
boost::property_tree::ptree event;
event.add ("account", account_a.to_account ());
event.add ("hash", block_a->hash ().to_string ());
std::string block_text;
block_a->serialize_json (block_text);
event.add ("block", block_text);
event.add ("amount", amount_a.to_string_dec ());
if (is_state_send_a)
{
event.add ("is_send", is_state_send_a);
event.add ("subtype", "send");
}
// Subtype field
else if (block_a->type () == nano::block_type::state)
{
if (block_a->link ().is_zero ())
{
event.add ("subtype", "change");
}
else if (is_state_epoch_a)
{
debug_assert (amount_a == 0 && node_l->ledger.is_epoch_link (block_a->link ()));
event.add ("subtype", "epoch");
}
else
{
event.add ("subtype", "receive");
}
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, event);
ostream.flush ();
auto body (std::make_shared<std::string> (ostream.str ()));
auto address (node_l->config.callback_address);
auto port (node_l->config.callback_port);
auto target (std::make_shared<std::string> (node_l->config.callback_target));
auto resolver (std::make_shared<boost::asio::ip::tcp::resolver> (node_l->io_ctx));
resolver->async_resolve (boost::asio::ip::tcp::resolver::query (address, std::to_string (port)), [node_l, address, port, target, body, resolver] (boost::system::error_code const & ec, boost::asio::ip::tcp::resolver::iterator i_a) {
if (!ec)
{
node_l->do_rpc_callback (i_a, address, port, target, body, resolver);
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.always_log (boost::str (boost::format ("Error resolving callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
});
}
});
}
if (websocket_server)
{
observers.blocks.add ([this] (nano::election_status const & status_a, std::vector<nano::vote_with_weight_info> const & votes_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a, bool is_state_epoch_a) {
debug_assert (status_a.type != nano::election_status_type::ongoing);
if (this->websocket_server->any_subscriber (nano::websocket::topic::confirmation))
{
auto block_a (status_a.winner);
std::string subtype;
if (is_state_send_a)
{
subtype = "send";
}
else if (block_a->type () == nano::block_type::state)
{
if (block_a->link ().is_zero ())
{
subtype = "change";
}
else if (is_state_epoch_a)
{
debug_assert (amount_a == 0 && this->ledger.is_epoch_link (block_a->link ()));
subtype = "epoch";
}
else
{
subtype = "receive";
}
}
this->websocket_server->broadcast_confirmation (block_a, account_a, amount_a, subtype, status_a, votes_a);
}
});
observers.active_stopped.add ([this] (nano::block_hash const & hash_a) {
if (this->websocket_server->any_subscriber (nano::websocket::topic::stopped_election))
{
nano::websocket::message_builder builder;
this->websocket_server->broadcast (builder.stopped_election (hash_a));
}
});
observers.telemetry.add ([this] (nano::telemetry_data const & telemetry_data, nano::endpoint const & endpoint) {
if (this->websocket_server->any_subscriber (nano::websocket::topic::telemetry))
{
nano::websocket::message_builder builder;
this->websocket_server->broadcast (builder.telemetry_received (telemetry_data, endpoint));
}
});
}
// Add block confirmation type stats regardless of http-callback and websocket subscriptions
observers.blocks.add ([this] (nano::election_status const & status_a, std::vector<nano::vote_with_weight_info> const & votes_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a, bool is_state_epoch_a) {
debug_assert (status_a.type != nano::election_status_type::ongoing);
switch (status_a.type)
{
case nano::election_status_type::active_confirmed_quorum:
this->stats.inc (nano::stat::type::confirmation_observer, nano::stat::detail::active_quorum, nano::stat::dir::out);
break;
case nano::election_status_type::active_confirmation_height:
this->stats.inc (nano::stat::type::confirmation_observer, nano::stat::detail::active_conf_height, nano::stat::dir::out);
break;
case nano::election_status_type::inactive_confirmation_height:
this->stats.inc (nano::stat::type::confirmation_observer, nano::stat::detail::inactive_conf_height, nano::stat::dir::out);
break;
default:
break;
}
});
observers.endpoint.add ([this] (std::shared_ptr<nano::transport::channel> const & channel_a) {
if (channel_a->get_type () == nano::transport::transport_type::udp)
{
this->network.send_keepalive (channel_a);
}
else
{
this->network.send_keepalive_self (channel_a);
}
});
observers.vote.add ([this] (std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> const & channel_a, nano::vote_code code_a) {
debug_assert (code_a != nano::vote_code::invalid);
// The vote_code::vote is handled inside the election
if (code_a == nano::vote_code::indeterminate)
{
auto active_in_rep_crawler (!this->rep_crawler.response (channel_a, vote_a));
if (active_in_rep_crawler)
{
// Representative is defined as online if replying to live votes or rep_crawler queries
this->online_reps.observe (vote_a->account);
}
this->gap_cache.vote (vote_a);
}
});
if (websocket_server)
{
observers.vote.add ([this] (std::shared_ptr<nano::vote> vote_a, std::shared_ptr<nano::transport::channel> const & channel_a, nano::vote_code code_a) {
if (this->websocket_server->any_subscriber (nano::websocket::topic::vote))
{
nano::websocket::message_builder builder;
auto msg (builder.vote_received (vote_a, code_a));
this->websocket_server->broadcast (msg);
}
});
}
// Cancelling local work generation
observers.work_cancel.add ([this] (nano::root const & root_a) {
this->work.cancel (root_a);
this->distributed_work.cancel (root_a);
});
logger.always_log ("Node starting, version: ", NANO_VERSION_STRING);
logger.always_log ("Build information: ", BUILD_INFO);
logger.always_log ("Database backend: ", store.vendor_get ());
auto const network_label = network_params.network.get_current_network_as_string ();
logger.always_log ("Active network: ", network_label);
logger.always_log (boost::str (boost::format ("Work pool running %1% threads %2%") % work.threads.size () % (work.opencl ? "(1 for OpenCL)" : "")));
logger.always_log (boost::str (boost::format ("%1% work peers configured") % config.work_peers.size ()));
if (!work_generation_enabled ())
{
logger.always_log ("Work generation is disabled");
}
if (config.logging.node_lifetime_tracing ())
{
logger.always_log ("Constructing node");
}
logger.always_log (boost::str (boost::format ("Outbound Voting Bandwidth limited to %1% bytes per second, burst ratio %2%") % config.bandwidth_limit % config.bandwidth_limit_burst_ratio));
// First do a pass with a read to see if any writing needs doing, this saves needing to open a write lock (and potentially blocking)
auto is_initialized (false);
{
auto const transaction (store.tx_begin_read ());
is_initialized = (store.account.begin (transaction) != store.account.end ());
}
if (!is_initialized && !flags.read_only)
{
auto const transaction (store.tx_begin_write ({ tables::accounts, tables::blocks, tables::confirmation_height, tables::frontiers }));
// Store was empty meaning we just created it, add the genesis block
store.initialize (transaction, ledger.cache);
}
if (!ledger.block_or_pruned_exists (config.network_params.ledger.genesis->hash ()))
{
std::stringstream ss;
ss << "Genesis block not found. This commonly indicates a configuration issue, check that the --network or --data_path command line arguments are correct, "
"and also the ledger backend node config option. If using a read-only CLI command a ledger must already exist, start the node with --daemon first.";
if (network_params.network.is_beta_network ())
{
ss << " Beta network may have reset, try clearing database files";
}
auto const str = ss.str ();
logger.always_log (str);
std::cerr << str << std::endl;
std::exit (1);
}
if (config.enable_voting)
{
std::ostringstream stream;
stream << "Voting is enabled, more system resources will be used";
auto voting (wallets.reps ().voting);
if (voting > 0)
{
stream << ". " << voting << " representative(s) are configured";
if (voting > 1)
{
stream << ". Voting with more than one representative can limit performance";
}
}
logger.always_log (stream.str ());
}
node_id = nano::keypair ();
logger.always_log ("Node ID: ", node_id.pub.to_node_id ());
if ((network_params.network.is_live_network () || network_params.network.is_beta_network ()) && !flags.inactive_node)
{
auto const bootstrap_weights = get_bootstrap_weights ();
// Use bootstrap weights if initial bootstrap is not completed
const bool use_bootstrap_weight = ledger.cache.block_count < bootstrap_weights.first;
if (use_bootstrap_weight)
{
ledger.bootstrap_weights = bootstrap_weights.second;
for (auto const & rep : ledger.bootstrap_weights)
{
logger.always_log ("Using bootstrap rep weight: ", rep.first.to_account (), " -> ", nano::uint128_union (rep.second).format_balance (Mxrb_ratio, 0, true), " XRB");
}
}
ledger.bootstrap_weight_max_blocks = bootstrap_weights.first;
// Drop unchecked blocks if initial bootstrap is completed
if (!flags.disable_unchecked_drop && !use_bootstrap_weight && !flags.read_only)
{
auto const transaction (store.tx_begin_write ({ tables::unchecked }));
store.unchecked.clear (transaction);
logger.always_log ("Dropping unchecked blocks");
}
}
ledger.pruning = flags.enable_pruning || store.pruned.count (store.tx_begin_read ()) > 0;
if (ledger.pruning)
{
if (config.enable_voting && !flags.inactive_node)
{
std::string str = "Incompatibility detected between config node.enable_voting and existing pruned blocks";
logger.always_log (str);
std::cerr << str << std::endl;
std::exit (1);
}
else if (!flags.enable_pruning && !flags.inactive_node)
{
std::string str = "To start node with existing pruned blocks use launch flag --enable_pruning";
logger.always_log (str);
std::cerr << str << std::endl;
std::exit (1);
}
}
}
node_initialized_latch.count_down ();
}
nano::node::~node ()
{
if (config.logging.node_lifetime_tracing ())
{
logger.always_log ("Destructing node");
}
stop ();
}
void nano::node::do_rpc_callback (boost::asio::ip::tcp::resolver::iterator i_a, std::string const & address, uint16_t port, std::shared_ptr<std::string> const & target, std::shared_ptr<std::string> const & body, std::shared_ptr<boost::asio::ip::tcp::resolver> const & resolver)
{
if (i_a != boost::asio::ip::tcp::resolver::iterator{})
{
auto node_l (shared_from_this ());
auto sock (std::make_shared<boost::asio::ip::tcp::socket> (node_l->io_ctx));
sock->async_connect (i_a->endpoint (), [node_l, target, body, sock, address, port, i_a, resolver] (boost::system::error_code const & ec) mutable {
if (!ec)
{
auto req (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
req->method (boost::beast::http::verb::post);
req->target (*target);
req->version (11);
req->insert (boost::beast::http::field::host, address);
req->insert (boost::beast::http::field::content_type, "application/json");
req->body () = *body;
req->prepare_payload ();
boost::beast::http::async_write (*sock, *req, [node_l, sock, address, port, req, i_a, target, body, resolver] (boost::system::error_code const & ec, std::size_t bytes_transferred) mutable {
if (!ec)
{
auto sb (std::make_shared<boost::beast::flat_buffer> ());
auto resp (std::make_shared<boost::beast::http::response<boost::beast::http::string_body>> ());
boost::beast::http::async_read (*sock, *sb, *resp, [node_l, sb, resp, sock, address, port, i_a, target, body, resolver] (boost::system::error_code const & ec, std::size_t bytes_transferred) mutable {
if (!ec)
{
if (boost::beast::http::to_status_class (resp->result ()) == boost::beast::http::status_class::successful)
{
node_l->stats.inc (nano::stat::type::http_callback, nano::stat::detail::initiate, nano::stat::dir::out);
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Callback to %1%:%2% failed with status: %3%") % address % port % resp->result ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable complete callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
};
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable to send callback: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
node_l->logger.try_log (boost::str (boost::format ("Unable to connect to callback address: %1%:%2%: %3%") % address % port % ec.message ()));
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
++i_a;
node_l->do_rpc_callback (i_a, address, port, target, body, resolver);
}
});
}
}
bool nano::node::copy_with_compaction (boost::filesystem::path const & destination)
{
return store.copy_db (destination);
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (node & node, std::string const & name)
{
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (collect_container_info (node.work, "work"));
composite->add_component (collect_container_info (node.gap_cache, "gap_cache"));
composite->add_component (collect_container_info (node.ledger, "ledger"));
composite->add_component (collect_container_info (node.active, "active"));
composite->add_component (collect_container_info (node.bootstrap_initiator, "bootstrap_initiator"));
composite->add_component (collect_container_info (node.bootstrap, "bootstrap"));
composite->add_component (collect_container_info (node.network, "network"));
if (node.telemetry)
{
composite->add_component (collect_container_info (*node.telemetry, "telemetry"));
}
composite->add_component (collect_container_info (node.workers, "workers"));
composite->add_component (collect_container_info (node.observers, "observers"));
composite->add_component (collect_container_info (node.wallets, "wallets"));
composite->add_component (collect_container_info (node.vote_processor, "vote_processor"));
composite->add_component (collect_container_info (node.rep_crawler, "rep_crawler"));
composite->add_component (collect_container_info (node.block_processor, "block_processor"));
composite->add_component (collect_container_info (node.block_arrival, "block_arrival"));
composite->add_component (collect_container_info (node.online_reps, "online_reps"));
composite->add_component (collect_container_info (node.history, "history"));
composite->add_component (collect_container_info (node.block_uniquer, "block_uniquer"));
composite->add_component (collect_container_info (node.vote_uniquer, "vote_uniquer"));
composite->add_component (collect_container_info (node.confirmation_height_processor, "confirmation_height_processor"));
composite->add_component (collect_container_info (node.distributed_work, "distributed_work"));
composite->add_component (collect_container_info (node.aggregator, "request_aggregator"));
return composite;
}
void nano::node::process_active (std::shared_ptr<nano::block> const & incoming)
{
block_arrival.add (incoming->hash ());
block_processor.add (incoming, nano::seconds_since_epoch ());
}
nano::process_return nano::node::process (nano::block & block_a)
{
auto const transaction (store.tx_begin_write ({ tables::accounts, tables::blocks, tables::frontiers, tables::pending }));
auto result (ledger.process (transaction, block_a));
return result;
}
nano::process_return nano::node::process_local (std::shared_ptr<nano::block> const & block_a)
{
// Add block hash as recently arrived to trigger automatic rebroadcast and election
block_arrival.add (block_a->hash ());
// Set current time to trigger automatic rebroadcast and election
nano::unchecked_info info (block_a, block_a->account (), nano::seconds_since_epoch (), nano::signature_verification::unknown);
// Notify block processor to release write lock
block_processor.wait_write ();
// Process block
block_post_events post_events ([&store = store] { return store.tx_begin_read (); });
auto const transaction (store.tx_begin_write ({ tables::accounts, tables::blocks, tables::frontiers, tables::pending }));
return block_processor.process_one (transaction, post_events, info, false, nano::block_origin::local);
}
void nano::node::process_local_async (std::shared_ptr<nano::block> const & block_a)
{
// Add block hash as recently arrived to trigger automatic rebroadcast and election
block_arrival.add (block_a->hash ());
// Set current time to trigger automatic rebroadcast and election
nano::unchecked_info info (block_a, block_a->account (), nano::seconds_since_epoch (), nano::signature_verification::unknown);
block_processor.add_local (info);
}
void nano::node::start ()
{
long_inactivity_cleanup ();
network.start ();
add_initial_peers ();
if (!flags.disable_legacy_bootstrap && !flags.disable_ongoing_bootstrap)
{
ongoing_bootstrap ();
}
if (!flags.disable_unchecked_cleanup)
{
auto this_l (shared ());
workers.push_task ([this_l] () {
this_l->ongoing_unchecked_cleanup ();
});
}
if (flags.enable_pruning)
{
auto this_l (shared ());
workers.push_task ([this_l] () {
this_l->ongoing_ledger_pruning ();
});
}
if (!flags.disable_rep_crawler)
{
rep_crawler.start ();
}
ongoing_rep_calculation ();
ongoing_peer_store ();
ongoing_online_weight_calculation_queue ();
bool tcp_enabled (false);
if (config.tcp_incoming_connections_max > 0 && !(flags.disable_bootstrap_listener && flags.disable_tcp_realtime))
{
bootstrap.start ();
tcp_enabled = true;
if (flags.disable_udp && network.port != bootstrap.port)
{
network.port = bootstrap.port;
}
}
if (!flags.disable_backup)
{
backup_wallet ();
}
if (!flags.disable_search_pending)
{
search_pending ();
}
if (!flags.disable_wallet_bootstrap)
{
// Delay to start wallet lazy bootstrap
auto this_l (shared ());
workers.add_timed_task (std::chrono::steady_clock::now () + std::chrono::minutes (1), [this_l] () {
this_l->bootstrap_wallet ();
});
}
// Start port mapping if external address is not defined and TCP or UDP ports are enabled
if (config.external_address == boost::asio::ip::address_v6{}.any ().to_string () && (tcp_enabled || !flags.disable_udp))
{
port_mapping.start ();
}
wallets.start ();
if (config.frontiers_confirmation != nano::frontiers_confirmation_mode::disabled)
{
workers.push_task ([this_l = shared ()] () {
this_l->ongoing_backlog_population ();
});
}
}
void nano::node::stop ()
{
if (!stopped.exchange (true))
{
logger.always_log ("Node stopping");
// Cancels ongoing work generation tasks, which may be blocking other threads
// No tasks may wait for work generation in I/O threads, or termination signal capturing will be unable to call node::stop()
distributed_work.stop ();
block_processor.stop ();
aggregator.stop ();
vote_processor.stop ();
scheduler.stop ();
active.stop ();
confirmation_height_processor.stop ();
network.stop ();
telemetry->stop ();
if (websocket_server)
{
websocket_server->stop ();
}
bootstrap_initiator.stop ();
bootstrap.stop ();
port_mapping.stop ();
checker.stop ();
wallets.stop ();
stats.stop ();
auto epoch_upgrade = epoch_upgrading.lock ();
if (epoch_upgrade->valid ())
{
epoch_upgrade->wait ();
}
workers.stop ();
// work pool is not stopped on purpose due to testing setup
}
}
void nano::node::keepalive_preconfigured (std::vector<std::string> const & peers_a)
{
for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i)
{
keepalive (*i, network_params.network.default_node_port);
}
}
nano::block_hash nano::node::latest (nano::account const & account_a)
{
auto const transaction (store.tx_begin_read ());
return ledger.latest (transaction, account_a);
}
nano::uint128_t nano::node::balance (nano::account const & account_a)
{
auto const transaction (store.tx_begin_read ());
return ledger.account_balance (transaction, account_a);
}
std::shared_ptr<nano::block> nano::node::block (nano::block_hash const & hash_a)
{
auto const transaction (store.tx_begin_read ());
return store.block.get (transaction, hash_a);
}
std::pair<nano::uint128_t, nano::uint128_t> nano::node::balance_pending (nano::account const & account_a, bool only_confirmed_a)
{
std::pair<nano::uint128_t, nano::uint128_t> result;
auto const transaction (store.tx_begin_read ());
result.first = ledger.account_balance (transaction, account_a, only_confirmed_a);
result.second = ledger.account_pending (transaction, account_a, only_confirmed_a);
return result;
}
nano::uint128_t nano::node::weight (nano::account const & account_a)
{
return ledger.weight (account_a);
}
nano::block_hash nano::node::rep_block (nano::account const & account_a)
{
auto const transaction (store.tx_begin_read ());
nano::account_info info;
nano::block_hash result (0);
if (!store.account.get (transaction, account_a, info))
{
result = ledger.representative (transaction, info.head);
}
return result;
}
nano::uint128_t nano::node::minimum_principal_weight ()
{
return minimum_principal_weight (online_reps.trended ());
}
nano::uint128_t nano::node::minimum_principal_weight (nano::uint128_t const & online_stake)
{
return online_stake / network_params.network.principal_weight_factor;
}
void nano::node::long_inactivity_cleanup ()
{
bool perform_cleanup = false;
auto const transaction (store.tx_begin_write ({ tables::online_weight, tables::peers }));
if (store.online_weight.count (transaction) > 0)
{
auto sample (store.online_weight.rbegin (transaction));
auto n (store.online_weight.end ());
debug_assert (sample != n);
auto const one_week_ago = static_cast<std::size_t> ((std::chrono::system_clock::now () - std::chrono::hours (7 * 24)).time_since_epoch ().count ());
perform_cleanup = sample->first < one_week_ago;
}
if (perform_cleanup)
{
store.online_weight.clear (transaction);
store.peer.clear (transaction);
logger.always_log ("Removed records of peers and online weight after a long period of inactivity");
}
}
void nano::node::ongoing_rep_calculation ()
{
auto now (std::chrono::steady_clock::now ());
vote_processor.calculate_weights ();
std::weak_ptr<nano::node> node_w (shared_from_this ());
workers.add_timed_task (now + std::chrono::minutes (10), [node_w] () {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_rep_calculation ();
}
});
}
void nano::node::ongoing_bootstrap ()
{
auto next_wakeup = network_params.network.bootstrap_interval;
if (warmed_up < 3)
{
// Re-attempt bootstrapping more aggressively on startup
next_wakeup = std::chrono::seconds (5);
if (!bootstrap_initiator.in_progress () && !network.empty ())
{
++warmed_up;
}
}
if (network_params.network.is_dev_network () && flags.bootstrap_interval != 0)
{
// For test purposes allow faster automatic bootstraps
next_wakeup = std::chrono::seconds (flags.bootstrap_interval);
++warmed_up;
}
// Differential bootstrap with max age (75% of all legacy attempts)
uint32_t frontiers_age (std::numeric_limits<uint32_t>::max ());
auto bootstrap_weight_reached (ledger.cache.block_count >= ledger.bootstrap_weight_max_blocks);
auto previous_bootstrap_count (stats.count (nano::stat::type::bootstrap, nano::stat::detail::initiate, nano::stat::dir::out) + stats.count (nano::stat::type::bootstrap, nano::stat::detail::initiate_legacy_age, nano::stat::dir::out));
/*
- Maximum value for 25% of attempts or if block count is below preconfigured value (initial bootstrap not finished)
- Node shutdown time minus 1 hour for start attempts (warm up)
- Default age value otherwise (1 day for live network, 1 hour for beta)
*/
if (bootstrap_weight_reached)
{
if (warmed_up < 3)
{
// Find last online weight sample (last active time for node)
uint64_t last_sample_time (0);
auto last_record = store.online_weight.rbegin (store.tx_begin_read ());
if (last_record != store.online_weight.end ())
{
last_sample_time = last_record->first;
}
uint64_t time_since_last_sample = std::chrono::duration_cast<std::chrono::seconds> (std::chrono::system_clock::now ().time_since_epoch ()).count () - last_sample_time / std::pow (10, 9); // Nanoseconds to seconds
if (time_since_last_sample + 60 * 60 < std::numeric_limits<uint32_t>::max ())
{
frontiers_age = std::max<uint32_t> (time_since_last_sample + 60 * 60, network_params.bootstrap.default_frontiers_age_seconds);
}
}
else if (previous_bootstrap_count % 4 != 0)
{
frontiers_age = network_params.bootstrap.default_frontiers_age_seconds;
}
}
// Bootstrap and schedule for next attempt
bootstrap_initiator.bootstrap (false, boost::str (boost::format ("auto_bootstrap_%1%") % previous_bootstrap_count), frontiers_age);
std::weak_ptr<nano::node> node_w (shared_from_this ());
workers.add_timed_task (std::chrono::steady_clock::now () + next_wakeup, [node_w] () {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_bootstrap ();
}
});
}
void nano::node::ongoing_peer_store ()
{
const bool stored (network.tcp_channels.store_all (true));
network.udp_channels.store_all (!stored);
std::weak_ptr<nano::node> node_w (shared_from_this ());
workers.add_timed_task (std::chrono::steady_clock::now () + network_params.network.peer_dump_interval, [node_w] () {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_peer_store ();
}
});
}
void nano::node::backup_wallet ()
{
auto transaction (wallets.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n; ++i)
{
boost::system::error_code error_chmod;
auto backup_path (application_path / "backup");
boost::filesystem::create_directories (backup_path);
nano::set_secure_perm_directory (backup_path, error_chmod);
i->second->store.write_backup (transaction, backup_path / (i->first.to_string () + ".json"));
}
auto this_l (shared ());
workers.add_timed_task (std::chrono::steady_clock::now () + network_params.node.backup_interval, [this_l] () {
this_l->backup_wallet ();
});
}
void nano::node::search_pending ()
{
// Reload wallets from disk
wallets.reload ();
// Search pending
wallets.search_pending_all ();
auto this_l (shared ());
workers.add_timed_task (std::chrono::steady_clock::now () + network_params.node.search_pending_interval, [this_l] () {
this_l->search_pending ();
});
}
void nano::node::bootstrap_wallet ()
{
std::deque<nano::account> accounts;
{
nano::lock_guard<nano::mutex> lock (wallets.mutex);
auto const transaction (wallets.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i)
{
auto & wallet (*i->second);
nano::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex);
for (auto j (wallet.store.begin (transaction)), m (wallet.store.end ()); j != m && accounts.size () < 128; ++j)
{
nano::account account (j->first);
accounts.push_back (account);
}
}
}
if (!accounts.empty ())
{
bootstrap_initiator.bootstrap_wallet (accounts);
}
}
void nano::node::unchecked_cleanup ()
{
std::vector<nano::uint128_t> digests;
std::deque<nano::unchecked_key> cleaning_list;
auto const attempt (bootstrap_initiator.current_attempt ());
const bool long_attempt (attempt != nullptr && std::chrono::duration_cast<std::chrono::seconds> (std::chrono::steady_clock::now () - attempt->attempt_start).count () > config.unchecked_cutoff_time.count ());
// Collect old unchecked keys
if (ledger.cache.block_count >= ledger.bootstrap_weight_max_blocks && !long_attempt)
{
auto const now (nano::seconds_since_epoch ());
auto const transaction (store.tx_begin_read ());
// Max 1M records to clean, max 2 minutes reading to prevent slow i/o systems issues
for (auto [i, n] = store.unchecked.full_range (transaction); i != n && cleaning_list.size () < 1024 * 1024 && nano::seconds_since_epoch () - now < 120; ++i)
{
nano::unchecked_key const & key (i->first);
nano::unchecked_info const & info (i->second);
if ((now - info.modified) > static_cast<uint64_t> (config.unchecked_cutoff_time.count ()))
{
digests.push_back (network.publish_filter.hash (info.block));
cleaning_list.push_back (key);
}
}
}
if (!cleaning_list.empty ())
{
logger.always_log (boost::str (boost::format ("Deleting %1% old unchecked blocks") % cleaning_list.size ()));
}
// Delete old unchecked keys in batches
while (!cleaning_list.empty ())
{
std::size_t deleted_count (0);
auto const transaction (store.tx_begin_write ({ tables::unchecked }));
while (deleted_count++ < 2 * 1024 && !cleaning_list.empty ())
{
auto key (cleaning_list.front ());
cleaning_list.pop_front ();
if (store.unchecked.exists (transaction, key))
{
store.unchecked.del (transaction, key);
}
}
}
// Delete from the duplicate filter
network.publish_filter.clear (digests);
}
void nano::node::ongoing_unchecked_cleanup ()
{
unchecked_cleanup ();
workers.add_timed_task (std::chrono::steady_clock::now () + network_params.node.unchecked_cleaning_interval, [this_l = shared ()] () {
this_l->ongoing_unchecked_cleanup ();
});
}
void nano::node::ongoing_backlog_population ()
{
populate_backlog ();
auto delay = config.network_params.network.is_dev_network () ? std::chrono::seconds{ 1 } : std::chrono::duration_cast<std::chrono::seconds> (std::chrono::minutes{ 5 });
workers.add_timed_task (std::chrono::steady_clock::now () + delay, [this_l = shared ()] () {
this_l->ongoing_backlog_population ();
});
}
bool nano::node::collect_ledger_pruning_targets (std::deque<nano::block_hash> & pruning_targets_a, nano::account & last_account_a, uint64_t const batch_read_size_a, uint64_t const max_depth_a, uint64_t const cutoff_time_a)
{
uint64_t read_operations (0);
bool finish_transaction (false);
auto const transaction (store.tx_begin_read ());
for (auto i (store.confirmation_height.begin (transaction, last_account_a)), n (store.confirmation_height.end ()); i != n && !finish_transaction;)
{
++read_operations;
auto const & account (i->first);
nano::block_hash hash (i->second.frontier);
uint64_t depth (0);
while (!hash.is_zero () && depth < max_depth_a)
{
auto block (store.block.get (transaction, hash));
if (block != nullptr)
{
if (block->sideband ().timestamp > cutoff_time_a || depth == 0)
{
hash = block->previous ();
}
else
{
break;
}
}
else
{
release_assert (depth != 0);
hash = 0;
}
if (++depth % batch_read_size_a == 0)
{
transaction.refresh ();
}
}
if (!hash.is_zero ())
{
pruning_targets_a.push_back (hash);
}
read_operations += depth;
if (read_operations >= batch_read_size_a)
{
last_account_a = account.number () + 1;
finish_transaction = true;
}
else
{
++i;
}
}
return !finish_transaction || last_account_a.is_zero ();
}
void nano::node::ledger_pruning (uint64_t const batch_size_a, bool bootstrap_weight_reached_a, bool log_to_cout_a)
{
uint64_t const max_depth (config.max_pruning_depth != 0 ? config.max_pruning_depth : std::numeric_limits<uint64_t>::max ());
uint64_t const cutoff_time (bootstrap_weight_reached_a ? nano::seconds_since_epoch () - config.max_pruning_age.count () : std::numeric_limits<uint64_t>::max ());
uint64_t pruned_count (0);
uint64_t transaction_write_count (0);
nano::account last_account (1); // 0 Burn account is never opened. So it can be used to break loop
std::deque<nano::block_hash> pruning_targets;
bool target_finished (false);
while ((transaction_write_count != 0 || !target_finished) && !stopped)
{
// Search pruning targets
while (pruning_targets.size () < batch_size_a && !target_finished && !stopped)
{
target_finished = collect_ledger_pruning_targets (pruning_targets, last_account, batch_size_a * 2, max_depth, cutoff_time);
}
// Pruning write operation
transaction_write_count = 0;
if (!pruning_targets.empty () && !stopped)
{
auto scoped_write_guard = write_database_queue.wait (nano::writer::pruning);
auto write_transaction (store.tx_begin_write ({ tables::blocks, tables::pruned }));
while (!pruning_targets.empty () && transaction_write_count < batch_size_a && !stopped)
{
auto const & pruning_hash (pruning_targets.front ());
auto account_pruned_count (ledger.pruning_action (write_transaction, pruning_hash, batch_size_a));
transaction_write_count += account_pruned_count;
pruning_targets.pop_front ();
}
pruned_count += transaction_write_count;
auto log_message (boost::str (boost::format ("%1% blocks pruned") % pruned_count));
if (!log_to_cout_a)
{
logger.try_log (log_message);
}
else
{
std::cout << log_message << std::endl;
}
}
}
auto const log_message (boost::str (boost::format ("Total recently pruned block count: %1%") % pruned_count));
if (!log_to_cout_a)
{
logger.always_log (log_message);
}
else
{
std::cout << log_message << std::endl;
}
}
void nano::node::ongoing_ledger_pruning ()
{
auto bootstrap_weight_reached (ledger.cache.block_count >= ledger.bootstrap_weight_max_blocks);
ledger_pruning (flags.block_processor_batch_size != 0 ? flags.block_processor_batch_size : 2 * 1024, bootstrap_weight_reached, false);
auto const ledger_pruning_interval (bootstrap_weight_reached ? config.max_pruning_age : std::min (config.max_pruning_age, std::chrono::seconds (15 * 60)));
auto this_l (shared ());
workers.add_timed_task (std::chrono::steady_clock::now () + ledger_pruning_interval, [this_l] () {
this_l->workers.push_task ([this_l] () {
this_l->ongoing_ledger_pruning ();
});
});
}
int nano::node::price (nano::uint128_t const & balance_a, int amount_a)
{
debug_assert (balance_a >= amount_a * nano::Gxrb_ratio);
auto balance_l (balance_a);
double result (0.0);
for (auto i (0); i < amount_a; ++i)
{
balance_l -= nano::Gxrb_ratio;
auto balance_scaled ((balance_l / nano::Mxrb_ratio).convert_to<double> ());
auto units (balance_scaled / 1000.0);
auto unit_price (((free_cutoff - units) / free_cutoff) * price_max);
result += std::min (std::max (0.0, unit_price), price_max);
}
return static_cast<int> (result * 100.0);
}
uint64_t nano::node::default_difficulty (nano::work_version const version_a) const
{
uint64_t result{ std::numeric_limits<uint64_t>::max () };
switch (version_a)
{
case nano::work_version::work_1:
result = network_params.work.threshold_base (version_a);
break;
default:
debug_assert (false && "Invalid version specified to default_difficulty");
}
return result;
}
uint64_t nano::node::default_receive_difficulty (nano::work_version const version_a) const
{
uint64_t result{ std::numeric_limits<uint64_t>::max () };
switch (version_a)
{
case nano::work_version::work_1:
result = network_params.work.epoch_2_receive;
break;
default:
debug_assert (false && "Invalid version specified to default_receive_difficulty");
}
return result;
}
uint64_t nano::node::max_work_generate_difficulty (nano::work_version const version_a) const
{
return nano::difficulty::from_multiplier (config.max_work_generate_multiplier, default_difficulty (version_a));
}
bool nano::node::local_work_generation_enabled () const
{
return config.work_threads > 0 || work.opencl;
}
bool nano::node::work_generation_enabled () const
{
return work_generation_enabled (config.work_peers);
}
bool nano::node::work_generation_enabled (std::vector<std::pair<std::string, uint16_t>> const & peers_a) const
{
return !peers_a.empty () || local_work_generation_enabled ();
}
boost::optional<uint64_t> nano::node::work_generate_blocking (nano::block & block_a, uint64_t difficulty_a)
{
auto opt_work_l (work_generate_blocking (block_a.work_version (), block_a.root (), difficulty_a, block_a.account ()));
if (opt_work_l.is_initialized ())
{
block_a.block_work_set (*opt_work_l);
}
return opt_work_l;
}
void nano::node::work_generate (nano::work_version const version_a, nano::root const & root_a, uint64_t difficulty_a, std::function<void (boost::optional<uint64_t>)> callback_a, boost::optional<nano::account> const & account_a, bool secondary_work_peers_a)
{
auto const & peers_l (secondary_work_peers_a ? config.secondary_work_peers : config.work_peers);
if (distributed_work.make (version_a, root_a, peers_l, difficulty_a, callback_a, account_a))
{
// Error in creating the job (either stopped or work generation is not possible)
callback_a (boost::none);
}
}
boost::optional<uint64_t> nano::node::work_generate_blocking (nano::work_version const version_a, nano::root const & root_a, uint64_t difficulty_a, boost::optional<nano::account> const & account_a)
{
std::promise<boost::optional<uint64_t>> promise;
work_generate (
version_a, root_a, difficulty_a, [&promise] (boost::optional<uint64_t> opt_work_a) {
promise.set_value (opt_work_a);
},
account_a);
return promise.get_future ().get ();
}
boost::optional<uint64_t> nano::node::work_generate_blocking (nano::block & block_a)
{
debug_assert (network_params.network.is_dev_network ());
return work_generate_blocking (block_a, default_difficulty (nano::work_version::work_1));
}
boost::optional<uint64_t> nano::node::work_generate_blocking (nano::root const & root_a)
{
debug_assert (network_params.network.is_dev_network ());
return work_generate_blocking (root_a, default_difficulty (nano::work_version::work_1));
}
boost::optional<uint64_t> nano::node::work_generate_blocking (nano::root const & root_a, uint64_t difficulty_a)
{
debug_assert (network_params.network.is_dev_network ());
return work_generate_blocking (nano::work_version::work_1, root_a, difficulty_a);
}
void nano::node::add_initial_peers ()
{
if (flags.disable_add_initial_peers)
{
logger.always_log ("Skipping add_initial_peers because disable_add_initial_peers is set");
return;
}
auto transaction (store.tx_begin_read ());
for (auto i (store.peer.begin (transaction)), n (store.peer.end ()); i != n; ++i)
{
nano::endpoint endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ());
if (!network.reachout (endpoint, config.allow_local_peers))
{
network.tcp_channels.start_tcp (endpoint);
}
}
}
void nano::node::block_confirm (std::shared_ptr<nano::block> const & block_a)
{
scheduler.manual (block_a);
scheduler.flush ();
auto election = active.election (block_a->qualified_root ());
if (election != nullptr)
{
election->transition_active ();
}
}
bool nano::node::block_confirmed (nano::block_hash const & hash_a)
{
auto transaction (store.tx_begin_read ());
return ledger.block_confirmed (transaction, hash_a);
}
bool nano::node::block_confirmed_or_being_confirmed (nano::transaction const & transaction_a, nano::block_hash const & hash_a)
{
return confirmation_height_processor.is_processing_block (hash_a) || ledger.block_confirmed (transaction_a, hash_a);
}
void nano::node::ongoing_online_weight_calculation_queue ()
{
std::weak_ptr<nano::node> node_w (shared_from_this ());
workers.add_timed_task (std::chrono::steady_clock::now () + (std::chrono::seconds (network_params.node.weight_period)), [node_w] () {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_online_weight_calculation ();
}
});
}
bool nano::node::online () const
{
return rep_crawler.total_weight () > online_reps.delta ();
}
void nano::node::ongoing_online_weight_calculation ()
{
online_reps.sample ();
ongoing_online_weight_calculation_queue ();
}
void nano::node::receive_confirmed (nano::transaction const & block_transaction_a, nano::block_hash const & hash_a, nano::account const & destination_a)
{
nano::unique_lock<nano::mutex> lk (wallets.mutex);
auto wallets_l = wallets.get_wallets ();
auto wallet_transaction = wallets.tx_begin_read ();
lk.unlock ();
for ([[maybe_unused]] auto const & [id, wallet] : wallets_l)
{
if (wallet->store.exists (wallet_transaction, destination_a))
{
nano::account representative;
nano::pending_info pending;
representative = wallet->store.representative (wallet_transaction);
auto error (store.pending.get (block_transaction_a, nano::pending_key (destination_a, hash_a), pending));
if (!error)
{
auto amount (pending.amount.number ());
wallet->receive_async (hash_a, representative, amount, destination_a, [] (std::shared_ptr<nano::block> const &) {});
}
else
{
if (!ledger.block_or_pruned_exists (block_transaction_a, hash_a))
{
logger.try_log (boost::str (boost::format ("Confirmed block is missing: %1%") % hash_a.to_string ()));
debug_assert (false && "Confirmed block is missing");
}
else
{
logger.try_log (boost::str (boost::format ("Block %1% has already been received") % hash_a.to_string ()));
}
}
}
}
}
void nano::node::process_confirmed_data (nano::transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a, nano::block_hash const & hash_a, nano::account & account_a, nano::uint128_t & amount_a, bool & is_state_send_a, bool & is_state_epoch_a, nano::account & pending_account_a)
{
// Faster account calculation
account_a = block_a->account ();
if (account_a.is_zero ())
{
account_a = block_a->sideband ().account;
}
// Faster amount calculation
auto previous (block_a->previous ());
bool error (false);
auto previous_balance (ledger.balance_safe (transaction_a, previous, error));
auto block_balance (store.block.balance_calculated (block_a));
if (hash_a != ledger.constants.genesis->account ())
{
if (!error)
{
amount_a = block_balance > previous_balance ? block_balance - previous_balance : previous_balance - block_balance;
}
else
{
amount_a = 0;
}
}
else
{
amount_a = nano::dev::constants.genesis_amount;
}
if (auto state = dynamic_cast<nano::state_block *> (block_a.get ()))
{
if (state->hashables.balance < previous_balance)
{
is_state_send_a = true;
}
if (amount_a == 0 && network_params.ledger.epochs.is_epoch_link (state->link ()))
{
is_state_epoch_a = true;
}
pending_account_a = state->hashables.link.as_account ();
}
if (auto send = dynamic_cast<nano::send_block *> (block_a.get ()))
{
pending_account_a = send->hashables.destination;
}
}
void nano::node::process_confirmed (nano::election_status const & status_a, uint64_t iteration_a)
{
auto hash (status_a.winner->hash ());
auto const num_iters = (config.block_processor_batch_max_time / network_params.node.process_confirmed_interval) * 4;
if (auto block_l = ledger.store.block.get (ledger.store.tx_begin_read (), hash))
{
active.add_recently_confirmed (block_l->qualified_root (), hash);
confirmation_height_processor.add (block_l);
}
else if (iteration_a < num_iters)
{
iteration_a++;
std::weak_ptr<nano::node> node_w (shared ());
workers.add_timed_task (std::chrono::steady_clock::now () + network_params.node.process_confirmed_interval, [node_w, status_a, iteration_a] () {
if (auto node_l = node_w.lock ())
{
node_l->process_confirmed (status_a, iteration_a);
}
});
}
else
{
// Do some cleanup due to this block never being processed by confirmation height processor
active.remove_election_winner_details (hash);
}
}
bool nano::block_arrival::add (nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
auto inserted (arrival.get<tag_sequence> ().emplace_back (nano::block_arrival_info{ now, hash_a }));
auto result (!inserted.second);
return result;
}
bool nano::block_arrival::recent (nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
while (arrival.size () > arrival_size_min && arrival.get<tag_sequence> ().front ().arrival + arrival_time_min < now)
{
arrival.get<tag_sequence> ().pop_front ();
}
return arrival.get<tag_hash> ().find (hash_a) != arrival.get<tag_hash> ().end ();
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_arrival & block_arrival, std::string const & name)
{
std::size_t count = 0;
{
nano::lock_guard<nano::mutex> guard (block_arrival.mutex);
count = block_arrival.arrival.size ();
}
auto sizeof_element = sizeof (decltype (block_arrival.arrival)::value_type);
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "arrival", count, sizeof_element }));
return composite;
}
std::shared_ptr<nano::node> nano::node::shared ()
{
return shared_from_this ();
}
int nano::node::store_version ()
{
auto transaction (store.tx_begin_read ());
return store.version.get (transaction);
}
bool nano::node::init_error () const
{
return store.init_error () || wallets_store.init_error ();
}
bool nano::node::epoch_upgrader (nano::raw_key const & prv_a, nano::epoch epoch_a, uint64_t count_limit, uint64_t threads)
{
bool error = stopped.load ();
if (!error)
{
auto epoch_upgrade = epoch_upgrading.lock ();
error = epoch_upgrade->valid () && epoch_upgrade->wait_for (std::chrono::seconds (0)) == std::future_status::timeout;
if (!error)
{
*epoch_upgrade = std::async (std::launch::async, &nano::node::epoch_upgrader_impl, this, prv_a, epoch_a, count_limit, threads);
}
}
return error;
}
void nano::node::set_bandwidth_params (std::size_t limit, double ratio)
{
config.bandwidth_limit_burst_ratio = ratio;
config.bandwidth_limit = limit;
network.set_bandwidth_params (limit, ratio);
logger.always_log (boost::str (boost::format ("set_bandwidth_params(%1%, %2%)") % limit % ratio));
}
void nano::node::epoch_upgrader_impl (nano::raw_key const & prv_a, nano::epoch epoch_a, uint64_t count_limit, uint64_t threads)
{
nano::thread_role::set (nano::thread_role::name::epoch_upgrader);
auto upgrader_process = [] (nano::node & node_a, std::atomic<uint64_t> & counter, std::shared_ptr<nano::block> const & epoch, uint64_t difficulty, nano::public_key const & signer_a, nano::root const & root_a, nano::account const & account_a) {
epoch->block_work_set (node_a.work_generate_blocking (nano::work_version::work_1, root_a, difficulty).value_or (0));
bool valid_signature (!nano::validate_message (signer_a, epoch->hash (), epoch->block_signature ()));
bool valid_work (node_a.network_params.work.difficulty (*epoch) >= difficulty);
nano::process_result result (nano::process_result::old);
if (valid_signature && valid_work)
{
result = node_a.process_local (epoch).code;
}
if (result == nano::process_result::progress)
{
++counter;
}
else
{
bool fork (result == nano::process_result::fork);
node_a.logger.always_log (boost::str (boost::format ("Failed to upgrade account %1%. Valid signature: %2%. Valid work: %3%. Block processor fork: %4%") % account_a.to_account () % valid_signature % valid_work % fork));
}
};
uint64_t const upgrade_batch_size = 1000;
nano::block_builder builder;
auto link (ledger.epoch_link (epoch_a));
nano::raw_key raw_key;
raw_key = prv_a;
auto signer (nano::pub_key (prv_a));
debug_assert (signer == ledger.epoch_signer (link));
nano::mutex upgrader_mutex;
nano::condition_variable upgrader_condition;
class account_upgrade_item final
{
public:
nano::account account{};
uint64_t modified{ 0 };
};
class account_tag
{
};
class modified_tag
{
};
// clang-format off
boost::multi_index_container<account_upgrade_item,
boost::multi_index::indexed_by<
boost::multi_index::ordered_non_unique<boost::multi_index::tag<modified_tag>,
boost::multi_index::member<account_upgrade_item, uint64_t, &account_upgrade_item::modified>,
std::greater<uint64_t>>,
boost::multi_index::hashed_unique<boost::multi_index::tag<account_tag>,
boost::multi_index::member<account_upgrade_item, nano::account, &account_upgrade_item::account>>>>
accounts_list;
// clang-format on
bool finished_upgrade (false);
while (!finished_upgrade && !stopped)
{
bool finished_accounts (false);
uint64_t total_upgraded_accounts (0);
while (!finished_accounts && count_limit != 0 && !stopped)
{
{
auto transaction (store.tx_begin_read ());
// Collect accounts to upgrade
for (auto i (store.account.begin (transaction)), n (store.account.end ()); i != n && accounts_list.size () < count_limit; ++i)
{
nano::account const & account (i->first);
nano::account_info const & info (i->second);
if (info.epoch () < epoch_a)
{
release_assert (nano::epochs::is_sequential (info.epoch (), epoch_a));
accounts_list.emplace (account_upgrade_item{ account, info.modified });
}
}
}
/* Upgrade accounts
Repeat until accounts with previous epoch exist in latest table */
std::atomic<uint64_t> upgraded_accounts (0);
uint64_t workers (0);
uint64_t attempts (0);
for (auto i (accounts_list.get<modified_tag> ().begin ()), n (accounts_list.get<modified_tag> ().end ()); i != n && attempts < upgrade_batch_size && attempts < count_limit && !stopped; ++i)
{
auto transaction (store.tx_begin_read ());
nano::account_info info;
nano::account const & account (i->account);
if (!store.account.get (transaction, account, info) && info.epoch () < epoch_a)
{
++attempts;
auto difficulty (network_params.work.threshold (nano::work_version::work_1, nano::block_details (epoch_a, false, false, true)));
nano::root const & root (info.head);
std::shared_ptr<nano::block> epoch = builder.state ()
.account (account)
.previous (info.head)
.representative (info.representative)
.balance (info.balance)
.link (link)
.sign (raw_key, signer)
.work (0)
.build ();
if (threads != 0)
{
{
nano::unique_lock<nano::mutex> lock (upgrader_mutex);
++workers;
while (workers > threads)
{
upgrader_condition.wait (lock);
}
}
this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_accounts, &workers, epoch, difficulty, signer, root, account] () {
upgrader_process (*node_l, upgraded_accounts, epoch, difficulty, signer, root, account);
{
nano::lock_guard<nano::mutex> lock (upgrader_mutex);
--workers;
}
upgrader_condition.notify_all ();
});
}
else
{
upgrader_process (*this, upgraded_accounts, epoch, difficulty, signer, root, account);
}
}
}
{
nano::unique_lock<nano::mutex> lock (upgrader_mutex);
while (workers > 0)
{
upgrader_condition.wait (lock);
}
}
total_upgraded_accounts += upgraded_accounts;
count_limit -= upgraded_accounts;
if (!accounts_list.empty ())
{
logger.always_log (boost::str (boost::format ("%1% accounts were upgraded to new epoch, %2% remain...") % total_upgraded_accounts % (accounts_list.size () - upgraded_accounts)));
accounts_list.clear ();
}
else
{
logger.always_log (boost::str (boost::format ("%1% total accounts were upgraded to new epoch") % total_upgraded_accounts));
finished_accounts = true;
}
}
// Pending blocks upgrade
bool finished_pending (false);
uint64_t total_upgraded_pending (0);
while (!finished_pending && count_limit != 0 && !stopped)
{
std::atomic<uint64_t> upgraded_pending (0);
uint64_t workers (0);
uint64_t attempts (0);
auto transaction (store.tx_begin_read ());
for (auto i (store.pending.begin (transaction, nano::pending_key (1, 0))), n (store.pending.end ()); i != n && attempts < upgrade_batch_size && attempts < count_limit && !stopped;)
{
bool to_next_account (false);
nano::pending_key const & key (i->first);
if (!store.account.exists (transaction, key.account))
{
nano::pending_info const & info (i->second);
if (info.epoch < epoch_a)
{
++attempts;
release_assert (nano::epochs::is_sequential (info.epoch, epoch_a));
auto difficulty (network_params.work.threshold (nano::work_version::work_1, nano::block_details (epoch_a, false, false, true)));
nano::root const & root (key.account);
nano::account const & account (key.account);
std::shared_ptr<nano::block> epoch = builder.state ()
.account (key.account)
.previous (0)
.representative (0)
.balance (0)
.link (link)
.sign (raw_key, signer)
.work (0)
.build ();
if (threads != 0)
{
{
nano::unique_lock<nano::mutex> lock (upgrader_mutex);
++workers;
while (workers > threads)
{
upgrader_condition.wait (lock);
}
}
this->workers.push_task ([node_l = shared_from_this (), &upgrader_process, &upgrader_mutex, &upgrader_condition, &upgraded_pending, &workers, epoch, difficulty, signer, root, account] () {
upgrader_process (*node_l, upgraded_pending, epoch, difficulty, signer, root, account);
{
nano::lock_guard<nano::mutex> lock (upgrader_mutex);
--workers;
}
upgrader_condition.notify_all ();
});
}
else
{
upgrader_process (*this, upgraded_pending, epoch, difficulty, signer, root, account);
}
}
}
else
{
to_next_account = true;
}
if (to_next_account)
{
// Move to next account if pending account exists or was upgraded
if (key.account.number () == std::numeric_limits<nano::uint256_t>::max ())
{
break;
}
else
{
i = store.pending.begin (transaction, nano::pending_key (key.account.number () + 1, 0));
}
}
else
{
// Move to next pending item
++i;
}
}
{
nano::unique_lock<nano::mutex> lock (upgrader_mutex);
while (workers > 0)
{
upgrader_condition.wait (lock);
}
}
total_upgraded_pending += upgraded_pending;
count_limit -= upgraded_pending;
// Repeat if some pending accounts were upgraded
if (upgraded_pending != 0)
{
logger.always_log (boost::str (boost::format ("%1% unopened accounts with pending blocks were upgraded to new epoch...") % total_upgraded_pending));
}
else
{
logger.always_log (boost::str (boost::format ("%1% total unopened accounts with pending blocks were upgraded to new epoch") % total_upgraded_pending));
finished_pending = true;
}
}
finished_upgrade = (total_upgraded_accounts == 0) && (total_upgraded_pending == 0);
}
logger.always_log ("Epoch upgrade is completed");
}
std::pair<uint64_t, decltype (nano::ledger::bootstrap_weights)> nano::node::get_bootstrap_weights () const
{
std::unordered_map<nano::account, nano::uint128_t> weights;
uint8_t const * weight_buffer = network_params.network.is_live_network () ? nano_bootstrap_weights_live : nano_bootstrap_weights_beta;
std::size_t weight_size = network_params.network.is_live_network () ? nano_bootstrap_weights_live_size : nano_bootstrap_weights_beta_size;
nano::bufferstream weight_stream ((uint8_t const *)weight_buffer, weight_size);
nano::uint128_union block_height;
uint64_t max_blocks = 0;
if (!nano::try_read (weight_stream, block_height))
{
max_blocks = nano::narrow_cast<uint64_t> (block_height.number ());
while (true)
{
nano::account account;
if (nano::try_read (weight_stream, account.bytes))
{
break;
}
nano::amount weight;
if (nano::try_read (weight_stream, weight.bytes))
{
break;
}
weights[account] = weight.number ();
}
}
return { max_blocks, weights };
}
void nano::node::populate_backlog ()
{
auto done = false;
uint64_t const chunk_size = 65536;
nano::account next = 0;
uint64_t total = 0;
while (!stopped && !done)
{
auto transaction = store.tx_begin_read ();
auto count = 0;
for (auto i = store.account.begin (transaction, next), n = store.account.end (); !stopped && i != n && count < chunk_size; ++i, ++count, ++total)
{
auto const & account = i->first;
scheduler.activate (account, transaction);
next = account.number () + 1;
}
done = store.account.begin (transaction, next) == store.account.end ();
}
}
nano::node_wrapper::node_wrapper (boost::filesystem::path const & path_a, boost::filesystem::path const & config_path_a, nano::node_flags const & node_flags_a) :
network_params{ nano::network_constants::active_network },
io_context (std::make_shared<boost::asio::io_context> ()),
work{ network_params.network, 1 }
{
boost::system::error_code error_chmod;
/*
* @warning May throw a filesystem exception
*/
boost::filesystem::create_directories (path_a);
nano::set_secure_perm_directory (path_a, error_chmod);
nano::daemon_config daemon_config{ path_a, network_params };
auto error = nano::read_node_config_toml (config_path_a, daemon_config, node_flags_a.config_overrides);
if (error)
{
std::cerr << "Error deserializing config file";
if (!node_flags_a.config_overrides.empty ())
{
std::cerr << " or --config option";
}
std::cerr << "\n"
<< error.get_message () << std::endl;
std::exit (1);
}
auto & node_config = daemon_config.node;
node_config.peering_port = 24000;
node_config.logging.max_size = std::numeric_limits<std::uintmax_t>::max ();
node_config.logging.init (path_a);
node = std::make_shared<nano::node> (*io_context, path_a, node_config, work, node_flags_a);
}
nano::node_wrapper::~node_wrapper ()
{
node->stop ();
}
nano::inactive_node::inactive_node (boost::filesystem::path const & path_a, boost::filesystem::path const & config_path_a, nano::node_flags const & node_flags_a) :
node_wrapper (path_a, config_path_a, node_flags_a),
node (node_wrapper.node)
{
node_wrapper.node->active.stop ();
}
nano::inactive_node::inactive_node (boost::filesystem::path const & path_a, nano::node_flags const & node_flags_a) :
inactive_node (path_a, path_a, node_flags_a)
{
}
nano::node_flags const & nano::inactive_node_flag_defaults ()
{
static nano::node_flags node_flags;
node_flags.inactive_node = true;
node_flags.read_only = true;
node_flags.generate_cache.reps = false;
node_flags.generate_cache.cemented_count = false;
node_flags.generate_cache.unchecked_count = false;
node_flags.generate_cache.account_count = false;
node_flags.disable_bootstrap_listener = true;
node_flags.disable_tcp_realtime = true;
return node_flags;
}
std::unique_ptr<nano::store> nano::make_store (nano::logger_mt & logger, boost::filesystem::path const & path, nano::ledger_constants & constants, bool read_only, bool add_db_postfix, nano::rocksdb_config const & rocksdb_config, nano::txn_tracking_config const & txn_tracking_config_a, std::chrono::milliseconds block_processor_batch_max_time_a, nano::lmdb_config const & lmdb_config_a, bool backup_before_upgrade)
{
if (rocksdb_config.enable)
{
return std::make_unique<nano::rocksdb_store> (logger, add_db_postfix ? path / "rocksdb" : path, constants, rocksdb_config, read_only);
}
return std::make_unique<nano::mdb_store> (logger, add_db_postfix ? path / "data.ldb" : path, constants, txn_tracking_config_a, block_processor_batch_max_time_a, lmdb_config_a, backup_before_upgrade);
}
| 1 | 17,075 | Thought it's good to have this logged down so that we can check the used value in the logs. | nanocurrency-nano-node | cpp |
@@ -83,7 +83,8 @@ type AWSMachineSpec struct {
// AWSMachineStatus defines the observed state of AWSMachine
type AWSMachineStatus struct {
// Ready is true when the provider resource is ready.
- Ready *bool `json:"ready,omitempty"`
+ // +optional
+ Ready bool `json:"ready"`
// Addresses contains the AWS instance associated addresses.
Addresses []v1.NodeAddress `json:"addresses,omitempty"` | 1 | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
)
// AWSMachineSpec defines the desired state of AWSMachine
type AWSMachineSpec struct {
// ProviderID is the unique identifier as specified by the cloud provider.
ProviderID *string `json:"providerID,omitempty"`
// AMI is the reference to the AMI from which to create the machine instance.
AMI AWSResourceReference `json:"ami,omitempty"`
// ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
// InstanceType is the type of instance to create. Example: m4.xlarge
InstanceType string `json:"instanceType,omitempty"`
// AdditionalTags is the set of tags to add to an instance, in addition to the ones
// added by default by the actuator. These tags are additive. The actuator will ensure
// these tags are present, but will not remove any other tags that may exist on the
// instance.
// +optional
AdditionalTags map[string]string `json:"additionalTags,omitempty"`
// IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
// +optional
IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"`
// PublicIP specifies whether the instance should get a public IP.
// Precedence for this setting is as follows:
// 1. This field if set
// 2. Cluster/flavor setting
// 3. Subnet default
// +optional
PublicIP *bool `json:"publicIP,omitempty"`
// AdditionalSecurityGroups is an array of references to security groups that should be applied to the
// instance. These security groups would be set in addition to any security groups defined
// at the cluster level or in the actuator.
// +optional
AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
// AvailabilityZone is references the AWS availability zone to use for this instance.
// If multiple subnets are matched for the availability zone, the first one return is picked.
// +optional
AvailabilityZone *string `json:"availabilityZone,omitempty"`
// Subnet is a reference to the subnet to use for this instance. If not specified,
// the cluster subnet will be used.
// +optional
Subnet *AWSResourceReference `json:"subnet,omitempty"`
// KeyName is the name of the SSH key to install on the instance.
// +optional
KeyName string `json:"keyName,omitempty"`
// RootDeviceSize is the size of the root volume.
// +optional
RootDeviceSize int64 `json:"rootDeviceSize,omitempty"`
}
// AWSMachineStatus defines the observed state of AWSMachine
type AWSMachineStatus struct {
// Ready is true when the provider resource is ready.
Ready *bool `json:"ready,omitempty"`
// Addresses contains the AWS instance associated addresses.
Addresses []v1.NodeAddress `json:"addresses,omitempty"`
// InstanceID is the instance ID of the machine created in AWS.
// +optional
InstanceID *string `json:"instanceID,omitempty"`
// InstanceState is the state of the AWS instance for this machine.
// +optional
InstanceState *InstanceState `json:"instanceState,omitempty"`
// ErrorReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorReason *common.MachineStatusError `json:"errorReason,omitempty"`
// ErrorMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AWSMachine is the Schema for the awsmachines API
// +k8s:openapi-gen=true
type AWSMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSMachineSpec `json:"spec,omitempty"`
Status AWSMachineStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AWSMachineList contains a list of AWSMachine
type AWSMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{})
}
| 1 | 10,056 | I don't think you can call this optional unless it's a pointer - remove this? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -116,6 +116,11 @@ public class OfflineEditFragment extends BaseFragment {
@OnClick(R.id.buttonSendAll)
protected void onSendAllProducts() {
+ List<SendProduct> listSaveProduct = SendProduct.listAll(SendProduct.class);
+ if (listSaveProduct.size() == 0) {
+ Toast.makeText(getActivity(), R.string.txtNoData, Toast.LENGTH_LONG).show();
+ return;
+ }
new MaterialDialog.Builder(getActivity())
.title(R.string.txtDialogsTitle)
.content(R.string.txtDialogsContentSend) | 1 | package openfoodfacts.github.scrachx.openfood.fragments;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Bitmap;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.ListView;
import android.widget.Toast;
import com.afollestad.materialdialogs.DialogAction;
import com.afollestad.materialdialogs.MaterialDialog;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import butterknife.OnClick;
import butterknife.OnItemClick;
import butterknife.OnItemLongClick;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.models.ProductImageField;
import openfoodfacts.github.scrachx.openfood.models.SaveItem;
import openfoodfacts.github.scrachx.openfood.models.SendProduct;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.SaveProductOfflineActivity;
import openfoodfacts.github.scrachx.openfood.views.adapters.SaveListAdapter;
import static org.apache.commons.lang3.StringUtils.isEmpty;
import static org.apache.commons.lang3.StringUtils.isNotEmpty;
public class OfflineEditFragment extends BaseFragment {
public static final String LOG_TAG = "OFFLINE_EDIT";
@BindView(R.id.listOfflineSave) ListView listView;
@BindView(R.id.buttonSendAll) Button buttonSend;
private List<SaveItem> saveItems;
private String loginS, passS;
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
return createView(inflater, container, R.layout.fragment_offline_edit);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
final SharedPreferences settingsLogin = getContext().getSharedPreferences("login", 0);
final SharedPreferences settingsUsage = getContext().getSharedPreferences("usage", 0);
saveItems = new ArrayList<>();
loginS = settingsLogin.getString("user", "");
passS = settingsLogin.getString("pass", "");
boolean firstUse = settingsUsage.getBoolean("firstOffline", false);
if(!firstUse) {
new MaterialDialog.Builder(getContext())
.title(R.string.title_info_dialog)
.content(R.string.text_offline_info_dialog)
.onPositive(new MaterialDialog.SingleButtonCallback() {
@Override
public void onClick(@NonNull MaterialDialog dialog, @NonNull DialogAction which) {
SharedPreferences.Editor editor = settingsUsage.edit();
editor.putBoolean("firstOffline", true);
editor.apply();
}
})
.positiveText(R.string.txtOk)
.show();
}
buttonSend.setEnabled(false);
}
@OnItemClick(R.id.listOfflineSave)
protected void OnClickListOffline(int position) {
Intent intent = new Intent(getActivity(), SaveProductOfflineActivity.class);
SaveItem si = (SaveItem) listView.getItemAtPosition(position);
intent.putExtra("barcode", si.getBarcode());
startActivity(intent);
}
@OnItemLongClick(R.id.listOfflineSave)
protected boolean OnLongClickListOffline(int position) {
final int lapos = position;
new MaterialDialog.Builder(getActivity())
.title(R.string.txtDialogsTitle)
.content(R.string.txtDialogsContentDelete)
.positiveText(R.string.txtYes)
.negativeText(R.string.txtNo)
.onPositive(new MaterialDialog.SingleButtonCallback() {
@Override
public void onClick(@NonNull MaterialDialog dialog, @NonNull DialogAction which) {
String barcode = saveItems.get(lapos).getBarcode();
SendProduct.deleteAll(SendProduct.class, "barcode = ?", barcode);
final SaveListAdapter sl = (SaveListAdapter) listView.getAdapter();
saveItems.remove(lapos);
getActivity().runOnUiThread(new Runnable() {
public void run() {
sl.notifyDataSetChanged();
}
});
}
})
.show();
return true;
}
@OnClick(R.id.buttonSendAll)
protected void onSendAllProducts() {
new MaterialDialog.Builder(getActivity())
.title(R.string.txtDialogsTitle)
.content(R.string.txtDialogsContentSend)
.positiveText(R.string.txtYes)
.negativeText(R.string.txtNo)
.onPositive(new MaterialDialog.SingleButtonCallback() {
@Override
public void onClick(@NonNull MaterialDialog dialog, @NonNull DialogAction which) {
OpenFoodAPIClient apiClient = new OpenFoodAPIClient(getContext());
final List<SendProduct> listSaveProduct = SendProduct.listAll(SendProduct.class);
for (final SendProduct product : listSaveProduct) {
if (isEmpty(product.getBarcode()) || isEmpty(product.getImgupload_front())) {
continue;
}
if(!loginS.isEmpty() && !passS.isEmpty()) {
product.setUserId(loginS);
product.setPassword(passS);
}
if(isNotEmpty(product.getImgupload_ingredients())) {
product.compress(ProductImageField.INGREDIENTS);
}
if(isNotEmpty(product.getImgupload_nutrition())) {
product.compress(ProductImageField.NUTRITION);
}
if(isNotEmpty(product.getImgupload_front())) {
product.compress(ProductImageField.FRONT);
}
apiClient.post(getActivity(), product, new OpenFoodAPIClient.OnProductSentCallback() {
@Override
public void onProductSentResponse(boolean value) {
if (value) {
saveItems.remove(listSaveProduct.indexOf(product));
((SaveListAdapter) listView.getAdapter()).notifyDataSetChanged();
SendProduct.deleteAll(SendProduct.class, "barcode = ?", product.getBarcode());
}
}
});
}
}
})
.show();
}
@Override
public void onResume() {
super.onResume();
new FillAdapter().execute(getActivity());
}
public class FillAdapter extends AsyncTask<Context, Void, Context> {
@Override
protected void onPreExecute() {
saveItems.clear();
List<SendProduct> listSaveProduct = SendProduct.listAll(SendProduct.class);
if (listSaveProduct.size() == 0) {
Toast.makeText(getActivity(), R.string.txtNoData, Toast.LENGTH_LONG).show();
} else {
Toast.makeText(getActivity(), R.string.txtLoading, Toast.LENGTH_LONG).show();
}
}
@Override
protected Context doInBackground(Context... ctx) {
List<SendProduct> listSaveProduct = SendProduct.listAll(SendProduct.class);
int imageIcon = R.drawable.ic_ok;
for (SendProduct product : listSaveProduct) {
if (isEmpty(product.getBarcode()) || isEmpty(product.getImgupload_front())
|| isEmpty(product.getBrands()) || isEmpty(product.getWeight()) || isEmpty(product.getName())) {
imageIcon = R.drawable.ic_no;
}
Bitmap bitmap = Utils.decodeFile(new File(product.getImgupload_front()));
if (bitmap == null) {
Log.e(LOG_TAG, "Unable to load the image of the product: " + product.getBarcode());
continue;
}
Bitmap imgUrl = Bitmap.createScaledBitmap(bitmap, 200, 200, true);
saveItems.add(new SaveItem(product.getName(), imageIcon, imgUrl, product.getBarcode()));
}
return ctx[0];
}
@Override
protected void onPostExecute(Context ctx) {
List<SendProduct> listSaveProduct = SendProduct.listAll(SendProduct.class);
if (listSaveProduct.isEmpty()) {
return;
}
SaveListAdapter adapter = new SaveListAdapter(ctx, saveItems);
listView.setAdapter(adapter);
boolean canSend = true;
for (SendProduct sp : listSaveProduct) {
if (isEmpty(sp.getBarcode()) || isEmpty(sp.getImgupload_front())) {
canSend = false;
break;
}
}
buttonSend.setEnabled(canSend);
}
}
}
| 1 | 62,326 | @naofum please use `isEmpty`method instead | openfoodfacts-openfoodfacts-androidapp | java |
@@ -280,12 +280,12 @@ bool gen_jit_and_run(compile_t* c, int* exit_code, jit_symbol_t* symbols,
auto resolver = orc::createLambdaResolver(local_lookup, external_lookup);
#if PONY_LLVM >= 500
- auto maybe_handle = compile_layer.addModule(module, resolver);
+ auto nullable_handle = compile_layer.addModule(module, resolver);
- if(!maybe_handle)
+ if(!nullable_handle)
return false;
- auto handle = maybe_handle.get();
+ auto handle = nullable_handle.get();
#else
std::vector<decltype(module)> module_set{module};
auto handle = compile_layer.addModuleSet(std::move(module_set), mem_mgr, | 1 | #include "genjit.h"
#include "genexe.h"
#include "genopt.h"
#if PONY_LLVM >= 700
#include "llvm_config_begin.h"
# include <llvm/ExecutionEngine/ExecutionEngine.h>
# include <llvm/ExecutionEngine/JITSymbol.h>
# include <llvm/ExecutionEngine/SectionMemoryManager.h>
# include <llvm/ExecutionEngine/Orc/CompileUtils.h>
# include <llvm/ExecutionEngine/Orc/Core.h>
# include <llvm/ExecutionEngine/Orc/Legacy.h>
# include <llvm/ExecutionEngine/Orc/IRCompileLayer.h>
# include <llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h>
# include <llvm/IR/DataLayout.h>
# include <llvm/IR/Mangler.h>
# include <llvm/Support/Error.h>
# include <llvm/Target/TargetMachine.h>
#include "llvm_config_end.h"
using namespace llvm;
using namespace llvm::orc;
class PonyJIT
{
ExecutionSession _es;
std::shared_ptr<SymbolResolver> _sr;
std::unique_ptr<TargetMachine> _tm;
const DataLayout _dl;
RTDyldObjectLinkingLayer _obj_layer;
IRCompileLayer<decltype(_obj_layer), SimpleCompiler> _compile_layer;
public:
Error error;
PonyJIT() :
_es(),
_sr(createLegacyLookupResolver(_es,
[this](const std::string& name) -> JITSymbol
{
auto symbol = _compile_layer.findSymbol(name, false);
if (symbol) return symbol;
auto err = symbol.takeError();
if (err) return std::move(err);
auto symaddr = RTDyldMemoryManager::getSymbolAddressInProcess(name);
if (symaddr)
return JITSymbol(symaddr, JITSymbolFlags::Exported);
return nullptr;
},
[this](Error err)
{
error = std::move(err);
})),
_tm(EngineBuilder().selectTarget()),
_dl(_tm->createDataLayout()),
_obj_layer(_es,
[this](VModuleKey)
{
return RTDyldObjectLinkingLayer::Resources
{
std::make_shared<SectionMemoryManager>(), _sr
};
}),
_compile_layer(_obj_layer, SimpleCompiler(*_tm)),
error(Error::success())
{
llvm::sys::DynamicLibrary::LoadLibraryPermanently(nullptr);
}
VModuleKey addModule(std::unique_ptr<Module> module)
{
VModuleKey key = _es.allocateVModule();
cantFail(_compile_layer.addModule(key, std::move(module)));
return key;
}
JITTargetAddress getSymbolAddress(const std::string& name)
{
std::string mangled;
raw_string_ostream mangled_stream(mangled);
Mangler::getNameWithPrefix(mangled_stream, name, _dl);
JITSymbol symbol = _compile_layer.findSymbol(mangled_stream.str(), false);
return cantFail(symbol.getAddress());
}
};
bool gen_jit_and_run(compile_t* c, int* exit_code, jit_symbol_t* symbols,
size_t symbol_count)
{
reach_type_t* t_main = reach_type_name(c->reach, "Main");
reach_type_t* t_env = reach_type_name(c->reach, "Env");
if ((t_main == NULL) || (t_env == NULL))
return false;
gen_main(c, t_main, t_env);
if (!genopt(c, true))
return false;
if (LLVMVerifyModule(c->module, LLVMReturnStatusAction, NULL) != 0)
return false;
PonyJIT jit;
jit.addModule(std::unique_ptr<Module>(unwrap(c->module)));
c->module = nullptr;
if (jit.error)
{
errorf(c->opt->check.errors, nullptr, "LLVM ORC JIT Error");
return false;
}
for (size_t i = 0; i < symbol_count; i++)
{
void* address = (void*)jit.getSymbolAddress(symbols[i].name);
if (address == nullptr)
return false;
memcpy(symbols[i].address, address, symbols[i].size);
}
auto main = reinterpret_cast<int(*)(int, const char**, const char**)>(
jit.getSymbolAddress("main")
);
if (main == nullptr)
return false;
const char* argv[] = { "ponyjit", nullptr };
const char* envp = nullptr;
int ec = main(1, argv, &envp);
if (exit_code != nullptr)
*exit_code = ec;
return true;
}
#else
#include "llvm_config_begin.h"
# include <llvm/IR/Mangler.h>
# include <llvm/IR/Module.h>
# include <llvm/ExecutionEngine/Orc/CompileUtils.h>
# include <llvm/ExecutionEngine/Orc/IRCompileLayer.h>
# include <llvm/ExecutionEngine/Orc/LambdaResolver.h>
# if PONY_LLVM >= 500
# include <llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h>
# else
# include <llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h>
# endif
# include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include "llvm_config_end.h"
namespace orc = llvm::orc;
#if PONY_LLVM >= 500
using LinkingLayerType = orc::RTDyldObjectLinkingLayer;
using CompileLayerType = orc::IRCompileLayer<LinkingLayerType,
orc::SimpleCompiler>;
using ModuleHandleType = CompileLayerType::ModuleHandleT;
#else
using LinkingLayerType = orc::ObjectLinkingLayer<orc::DoNothingOnNotifyLoaded>;
using CompileLayerType = orc::IRCompileLayer<LinkingLayerType>;
using ModuleHandleType = CompileLayerType::ModuleSetHandleT;
#endif
static std::string mangle_symbol(llvm::Mangler& mangler,
const llvm::GlobalValue* val)
{
std::string mangled{};
llvm::raw_string_ostream mangle_stream{mangled};
mangler.getNameWithPrefix(mangle_stream, val, false);
mangle_stream.flush();
return mangled;
}
static void* find_symbol(llvm::Module& module, CompileLayerType& compile_layer,
ModuleHandleType handle, llvm::Mangler& mangler, std::string const& name)
{
const llvm::GlobalValue* val = module.getNamedGlobal(name);
if(val == nullptr)
val = module.getFunction(name);
if(val == nullptr)
return nullptr;
auto local_symbol = compile_layer.findSymbolIn(handle,
mangle_symbol(mangler, val), false);
if(!local_symbol)
return nullptr;
auto address = local_symbol.getAddress();
#if PONY_LLVM >= 500
if(!address)
return nullptr;
return reinterpret_cast<void*>(address.get());
#else
return reinterpret_cast<void*>(address);
#endif
}
bool gen_jit_and_run(compile_t* c, int* exit_code, jit_symbol_t* symbols,
size_t symbol_count)
{
reach_type_t* t_main = reach_type_name(c->reach, "Main");
reach_type_t* t_env = reach_type_name(c->reach, "Env");
if ((t_main == NULL) || (t_env == NULL))
return false;
gen_main(c, t_main, t_env);
if (!genopt(c, true))
return false;
if (LLVMVerifyModule(c->module, LLVMReturnStatusAction, NULL) != 0)
return false;
// The Orc JIT wants the module in a shared_ptr, but we don't want to transfer
// ownership to that shared_ptr. Use an empty deleter so that the module
// doesn't get freed when the shared_ptr is destroyed.
auto noop_deleter = [](void* p){ (void)p; };
std::shared_ptr<llvm::Module> module{llvm::unwrap(c->module), noop_deleter};
auto machine = reinterpret_cast<llvm::TargetMachine*>(c->machine);
auto mem_mgr = std::make_shared<llvm::SectionMemoryManager>();
LinkingLayerType linking_layer{
#if PONY_LLVM >= 500
[&mem_mgr]{ return mem_mgr; }
#endif
};
CompileLayerType compile_layer{linking_layer, orc::SimpleCompiler{*machine}};
auto local_lookup = [&compile_layer](llvm::StringRef name)
{
#if PONY_LLVM >= 400
return compile_layer.findSymbol(name, false);
#else
if(auto sym = compile_layer.findSymbol(name, false))
return sym.toRuntimeDyldSymbol();
return llvm::RuntimeDyld::SymbolInfo{nullptr};
#endif
};
auto external_lookup = [
#if PONY_LLVM >= 500
&mem_mgr
#endif
](llvm::StringRef name)
{
#if PONY_LLVM >= 500
return mem_mgr->findSymbol(name);
#else
# if PONY_LLVM >= 400
using SymType = llvm::JITSymbol;
# else
using SymType = llvm::RuntimeDyld::SymbolInfo;
# endif
if(auto sym = llvm::RTDyldMemoryManager::getSymbolAddressInProcess(name))
return SymType{sym, llvm::JITSymbolFlags::Exported};
return SymType{nullptr};
#endif
};
auto resolver = orc::createLambdaResolver(local_lookup, external_lookup);
#if PONY_LLVM >= 500
auto maybe_handle = compile_layer.addModule(module, resolver);
if(!maybe_handle)
return false;
auto handle = maybe_handle.get();
#else
std::vector<decltype(module)> module_set{module};
auto handle = compile_layer.addModuleSet(std::move(module_set), mem_mgr,
std::move(resolver));
#endif
llvm::Mangler mangler{};
for(size_t i = 0; i < symbol_count; i++)
{
auto address = find_symbol(*module, compile_layer, handle, mangler,
symbols[i].name);
if(address == nullptr)
return false;
memcpy(symbols[i].address, address, symbols[i].size);
}
auto local_main = reinterpret_cast<int(*)(int, const char**, const char**)>(
find_symbol(*module, compile_layer, handle, mangler, "main"));
if(local_main == nullptr)
return false;
const char* argv[] = {"ponyjit", nullptr};
const char* envp = NULL;
int ec = local_main(1, argv, &envp);
if(exit_code != nullptr)
*exit_code = ec;
return true;
}
#endif
| 1 | 13,854 | i think this is unrelated and needs to be reverted. need to discuss at sync. | ponylang-ponyc | c |
@@ -19,8 +19,10 @@ import org.hyperledger.besu.datatypes.Hash;
import java.util.Optional;
+import org.apache.tuweni.bytes.Bytes;
+
/** A block header capable of being processed. */
-public class ProcessableBlockHeader {
+public class ProcessableBlockHeader implements org.hyperledger.besu.plugin.data.BlockHeader {
protected final Hash parentHash;
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import java.util.Optional;
/** A block header capable of being processed. */
public class ProcessableBlockHeader {
protected final Hash parentHash;
protected final Address coinbase;
protected final Difficulty difficulty;
protected final long number;
protected final long gasLimit;
// The block creation timestamp (seconds since the unix epoch)
protected final long timestamp;
// base fee is included for post EIP-1559 blocks
protected final Long baseFee;
protected ProcessableBlockHeader(
final Hash parentHash,
final Address coinbase,
final Difficulty difficulty,
final long number,
final long gasLimit,
final long timestamp,
final Long baseFee) {
this.parentHash = parentHash;
this.coinbase = coinbase;
this.difficulty = difficulty;
this.number = number;
this.gasLimit = gasLimit;
this.timestamp = timestamp;
this.baseFee = baseFee;
}
/**
* Returns the block parent block hash.
*
* @return the block parent block hash
*/
public Hash getParentHash() {
return parentHash;
}
/**
* Returns the block coinbase address.
*
* @return the block coinbase address
*/
public Address getCoinbase() {
return coinbase;
}
/**
* Returns the block difficulty.
*
* @return the block difficulty
*/
public Difficulty getDifficulty() {
return difficulty;
}
/**
* Returns the block number.
*
* @return the block number
*/
public long getNumber() {
return number;
}
/**
* Return the block gas limit.
*
* @return the block gas limit
*/
public long getGasLimit() {
return gasLimit;
}
/**
* Return the block timestamp.
*
* @return the block timestamp
*/
public long getTimestamp() {
return timestamp;
}
/**
* Returns the basefee of the block.
*
* @return the raw bytes of the extra data field
*/
public Optional<Long> getBaseFee() {
return Optional.ofNullable(baseFee);
}
}
| 1 | 25,984 | Are we eventually moving those type interfaces from plugin project to datatype project? If not, then we'd have a dependency from core onto plugins, which seems a little counter-intuitive to me. | hyperledger-besu | java |
@@ -1372,6 +1372,14 @@ Connection.prototype.setClient = function setClient(client) {
return this;
};
+Connection.prototype.syncIndexes = async function syncIndexes() {
+ const result = [];
+ for (const model in this.models) {
+ result.push(await this.model(model).syncIndexes());
+ }
+ return result;
+};
+
/**
* Switches to a different database using the same connection pool.
* | 1 | 'use strict';
/*!
* Module dependencies.
*/
const ChangeStream = require('./cursor/ChangeStream');
const EventEmitter = require('events').EventEmitter;
const Schema = require('./schema');
const Collection = require('./driver').get().Collection;
const STATES = require('./connectionstate');
const MongooseError = require('./error/index');
const PromiseProvider = require('./promise_provider');
const ServerSelectionError = require('./error/serverSelection');
const applyPlugins = require('./helpers/schema/applyPlugins');
const promiseOrCallback = require('./helpers/promiseOrCallback');
const get = require('./helpers/get');
const immediate = require('./helpers/immediate');
const mongodb = require('mongodb');
const pkg = require('../package.json');
const utils = require('./utils');
const processConnectionOptions = require('./helpers/processConnectionOptions');
const arrayAtomicsSymbol = require('./helpers/symbols').arrayAtomicsSymbol;
const sessionNewDocuments = require('./helpers/symbols').sessionNewDocuments;
/*!
* A list of authentication mechanisms that don't require a password for authentication.
* This is used by the authMechanismDoesNotRequirePassword method.
*
* @api private
*/
const noPasswordAuthMechanisms = [
'MONGODB-X509'
];
/**
* Connection constructor
*
* For practical reasons, a Connection equals a Db.
*
* @param {Mongoose} base a mongoose instance
* @inherits NodeJS EventEmitter http://nodejs.org/api/events.html#events_class_events_eventemitter
* @event `connecting`: Emitted when `connection.openUri()` is executed on this connection.
* @event `connected`: Emitted when this connection successfully connects to the db. May be emitted _multiple_ times in `reconnected` scenarios.
* @event `open`: Emitted after we `connected` and `onOpen` is executed on all of this connections models.
* @event `disconnecting`: Emitted when `connection.close()` was executed.
* @event `disconnected`: Emitted after getting disconnected from the db.
* @event `close`: Emitted after we `disconnected` and `onClose` executed on all of this connections models.
* @event `reconnected`: Emitted after we `connected` and subsequently `disconnected`, followed by successfully another successful connection.
* @event `error`: Emitted when an error occurs on this connection.
* @event `fullsetup`: Emitted after the driver has connected to primary and all secondaries if specified in the connection string.
* @api public
*/
function Connection(base) {
this.base = base;
this.collections = {};
this.models = {};
this.config = {};
this.replica = false;
this.options = null;
this.otherDbs = []; // FIXME: To be replaced with relatedDbs
this.relatedDbs = {}; // Hashmap of other dbs that share underlying connection
this.states = STATES;
this._readyState = STATES.disconnected;
this._closeCalled = false;
this._hasOpened = false;
this.plugins = [];
if (typeof base === 'undefined' || !base.connections.length) {
this.id = 0;
} else {
this.id = base.connections.length;
}
this._queue = [];
}
/*!
* Inherit from EventEmitter
*/
Connection.prototype.__proto__ = EventEmitter.prototype;
/**
* Connection ready state
*
* - 0 = disconnected
* - 1 = connected
* - 2 = connecting
* - 3 = disconnecting
*
* Each state change emits its associated event name.
*
* ####Example
*
* conn.on('connected', callback);
* conn.on('disconnected', callback);
*
* @property readyState
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'readyState', {
get: function() {
return this._readyState;
},
set: function(val) {
if (!(val in STATES)) {
throw new Error('Invalid connection state: ' + val);
}
if (this._readyState !== val) {
this._readyState = val;
// [legacy] loop over the otherDbs on this connection and change their state
for (const db of this.otherDbs) {
db.readyState = val;
}
if (STATES.connected === val) {
this._hasOpened = true;
}
this.emit(STATES[val]);
}
}
});
/**
* Gets the value of the option `key`. Equivalent to `conn.options[key]`
*
* ####Example:
*
* conn.get('test'); // returns the 'test' value
*
* @param {String} key
* @method get
* @api public
*/
Connection.prototype.get = function(key) {
if (this.config.hasOwnProperty(key)) {
return this.config[key];
}
return get(this.options, key);
};
/**
* Sets the value of the option `key`. Equivalent to `conn.options[key] = val`
*
* Supported options include:
*
* - `maxTimeMS`: Set [`maxTimeMS`](/docs/api.html#query_Query-maxTimeMS) for all queries on this connection.
*
* ####Example:
*
* conn.set('test', 'foo');
* conn.get('test'); // 'foo'
* conn.options.test; // 'foo'
*
* @param {String} key
* @param {Any} val
* @method set
* @api public
*/
Connection.prototype.set = function(key, val) {
if (this.config.hasOwnProperty(key)) {
this.config[key] = val;
return val;
}
this.options = this.options || {};
this.options[key] = val;
return val;
};
/**
* A hash of the collections associated with this connection
*
* @property collections
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.collections;
/**
* The name of the database this connection points to.
*
* ####Example
*
* mongoose.createConnection('mongodb://localhost:27017/mydb').name; // "mydb"
*
* @property name
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.name;
/**
* A [POJO](https://masteringjs.io/tutorials/fundamentals/pojo) containing
* a map from model names to models. Contains all models that have been
* added to this connection using [`Connection#model()`](/docs/api/connection.html#connection_Connection-model).
*
* ####Example
*
* const conn = mongoose.createConnection();
* const Test = conn.model('Test', mongoose.Schema({ name: String }));
*
* Object.keys(conn.models).length; // 1
* conn.models.Test === Test; // true
*
* @property models
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.models;
/**
* A number identifier for this connection. Used for debugging when
* you have [multiple connections](/docs/connections.html#multiple_connections).
*
* ####Example
*
* // The default connection has `id = 0`
* mongoose.connection.id; // 0
*
* // If you create a new connection, Mongoose increments id
* const conn = mongoose.createConnection();
* conn.id; // 1
*
* @property id
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.id;
/**
* The plugins that will be applied to all models created on this connection.
*
* ####Example:
*
* const db = mongoose.createConnection('mongodb://localhost:27017/mydb');
* db.plugin(() => console.log('Applied'));
* db.plugins.length; // 1
*
* db.model('Test', new Schema({})); // Prints "Applied"
*
* @property plugins
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'plugins', {
configurable: false,
enumerable: true,
writable: true
});
/**
* The host name portion of the URI. If multiple hosts, such as a replica set,
* this will contain the first host name in the URI
*
* ####Example
*
* mongoose.createConnection('mongodb://localhost:27017/mydb').host; // "localhost"
*
* @property host
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'host', {
configurable: true,
enumerable: true,
writable: true
});
/**
* The port portion of the URI. If multiple hosts, such as a replica set,
* this will contain the port from the first host name in the URI.
*
* ####Example
*
* mongoose.createConnection('mongodb://localhost:27017/mydb').port; // 27017
*
* @property port
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'port', {
configurable: true,
enumerable: true,
writable: true
});
/**
* The username specified in the URI
*
* ####Example
*
* mongoose.createConnection('mongodb://val:psw@localhost:27017/mydb').user; // "val"
*
* @property user
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'user', {
configurable: true,
enumerable: true,
writable: true
});
/**
* The password specified in the URI
*
* ####Example
*
* mongoose.createConnection('mongodb://val:psw@localhost:27017/mydb').pass; // "psw"
*
* @property pass
* @memberOf Connection
* @instance
* @api public
*/
Object.defineProperty(Connection.prototype, 'pass', {
configurable: true,
enumerable: true,
writable: true
});
/**
* The mongodb.Db instance, set when the connection is opened
*
* @property db
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.db;
/**
* The MongoClient instance this connection uses to talk to MongoDB. Mongoose automatically sets this property
* when the connection is opened.
*
* @property client
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.client;
/**
* A hash of the global options that are associated with this connection
*
* @property config
* @memberOf Connection
* @instance
* @api public
*/
Connection.prototype.config;
/**
* Helper for `createCollection()`. Will explicitly create the given collection
* with specified options. Used to create [capped collections](https://docs.mongodb.com/manual/core/capped-collections/)
* and [views](https://docs.mongodb.com/manual/core/views/) from mongoose.
*
* Options are passed down without modification to the [MongoDB driver's `createCollection()` function](http://mongodb.github.io/node-mongodb-native/2.2/api/Db.html#createCollection)
*
* @method createCollection
* @param {string} collection The collection to create
* @param {Object} [options] see [MongoDB driver docs](http://mongodb.github.io/node-mongodb-native/2.2/api/Db.html#createCollection)
* @param {Function} [callback]
* @return {Promise}
* @api public
*/
Connection.prototype.createCollection = _wrapConnHelper(function createCollection(collection, options, cb) {
if (typeof options === 'function') {
cb = options;
options = {};
}
this.db.createCollection(collection, options, cb);
});
/**
* _Requires MongoDB >= 3.6.0._ Starts a [MongoDB session](https://docs.mongodb.com/manual/release-notes/3.6/#client-sessions)
* for benefits like causal consistency, [retryable writes](https://docs.mongodb.com/manual/core/retryable-writes/),
* and [transactions](http://thecodebarbarian.com/a-node-js-perspective-on-mongodb-4-transactions.html).
*
* ####Example:
*
* const session = await conn.startSession();
* let doc = await Person.findOne({ name: 'Ned Stark' }, null, { session });
* await doc.remove();
* // `doc` will always be null, even if reading from a replica set
* // secondary. Without causal consistency, it is possible to
* // get a doc back from the below query if the query reads from a
* // secondary that is experiencing replication lag.
* doc = await Person.findOne({ name: 'Ned Stark' }, null, { session, readPreference: 'secondary' });
*
*
* @method startSession
* @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html#startSession)
* @param {Boolean} [options.causalConsistency=true] set to false to disable causal consistency
* @param {Function} [callback]
* @return {Promise<ClientSession>} promise that resolves to a MongoDB driver `ClientSession`
* @api public
*/
Connection.prototype.startSession = _wrapConnHelper(function startSession(options, cb) {
if (typeof options === 'function') {
cb = options;
options = null;
}
const session = this.client.startSession(options);
cb(null, session);
});
/**
* _Requires MongoDB >= 3.6.0._ Executes the wrapped async function
* in a transaction. Mongoose will commit the transaction if the
* async function executes successfully and attempt to retry if
* there was a retriable error.
*
* Calls the MongoDB driver's [`session.withTransaction()`](http://mongodb.github.io/node-mongodb-native/3.5/api/ClientSession.html#withTransaction),
* but also handles resetting Mongoose document state as shown below.
*
* ####Example:
*
* const doc = new Person({ name: 'Will Riker' });
* await db.transaction(async function setRank(session) {
* doc.rank = 'Captain';
* await doc.save({ session });
* doc.isNew; // false
*
* // Throw an error to abort the transaction
* throw new Error('Oops!');
* },{ readPreference: 'primary' }).catch(() => {});
*
* // true, `transaction()` reset the document's state because the
* // transaction was aborted.
* doc.isNew;
*
* @method transaction
* @param {Function} fn Function to execute in a transaction
* @param {mongodb.TransactionOptions} [options] Optional settings for the transaction
* @return {Promise<Any>} promise that is fulfilled if Mongoose successfully committed the transaction, or rejects if the transaction was aborted or if Mongoose failed to commit the transaction. If fulfilled, the promise resolves to a MongoDB command result.
* @api public
*/
Connection.prototype.transaction = function transaction(fn, options) {
return this.startSession().then(session => {
session[sessionNewDocuments] = new Map();
return session.withTransaction(() => fn(session), options).
then(res => {
delete session[sessionNewDocuments];
return res;
}).
catch(err => {
// If transaction was aborted, we need to reset newly
// inserted documents' `isNew`.
for (const doc of session[sessionNewDocuments].keys()) {
const state = session[sessionNewDocuments].get(doc);
if (state.hasOwnProperty('isNew')) {
doc.$isNew = state.$isNew;
}
if (state.hasOwnProperty('versionKey')) {
doc.set(doc.schema.options.versionKey, state.versionKey);
}
for (const path of state.modifiedPaths) {
doc.$__.activePaths.paths[path] = 'modify';
doc.$__.activePaths.states.modify[path] = true;
}
for (const path of state.atomics.keys()) {
const val = doc.$__getValue(path);
if (val == null) {
continue;
}
val[arrayAtomicsSymbol] = state.atomics.get(path);
}
}
delete session[sessionNewDocuments];
throw err;
});
});
};
/**
* Helper for `dropCollection()`. Will delete the given collection, including
* all documents and indexes.
*
* @method dropCollection
* @param {string} collection The collection to delete
* @param {Function} [callback]
* @return {Promise}
* @api public
*/
Connection.prototype.dropCollection = _wrapConnHelper(function dropCollection(collection, cb) {
this.db.dropCollection(collection, cb);
});
/**
* Helper for `dropDatabase()`. Deletes the given database, including all
* collections, documents, and indexes.
*
* ####Example:
*
* const conn = mongoose.createConnection('mongodb://localhost:27017/mydb');
* // Deletes the entire 'mydb' database
* await conn.dropDatabase();
*
* @method dropDatabase
* @param {Function} [callback]
* @return {Promise}
* @api public
*/
Connection.prototype.dropDatabase = _wrapConnHelper(function dropDatabase(cb) {
// If `dropDatabase()` is called, this model's collection will not be
// init-ed. It is sufficiently common to call `dropDatabase()` after
// `mongoose.connect()` but before creating models that we want to
// support this. See gh-6967
for (const name of Object.keys(this.models)) {
delete this.models[name].$init;
}
this.db.dropDatabase(cb);
});
/*!
* ignore
*/
function _wrapConnHelper(fn) {
return function() {
const cb = arguments.length > 0 ? arguments[arguments.length - 1] : null;
const argsWithoutCb = typeof cb === 'function' ?
Array.prototype.slice.call(arguments, 0, arguments.length - 1) :
Array.prototype.slice.call(arguments);
const disconnectedError = new MongooseError('Connection ' + this.id +
' was disconnected when calling `' + fn.name + '`');
return promiseOrCallback(cb, cb => {
immediate(() => {
if ((this.readyState === STATES.connecting || this.readyState === STATES.disconnected) && this._shouldBufferCommands()) {
this._queue.push({ fn: fn, ctx: this, args: argsWithoutCb.concat([cb]) });
} else if (this.readyState === STATES.disconnected && this.db == null) {
cb(disconnectedError);
} else {
try {
fn.apply(this, argsWithoutCb.concat([cb]));
} catch (err) {
return cb(err);
}
}
});
});
};
}
/*!
* ignore
*/
Connection.prototype._shouldBufferCommands = function _shouldBufferCommands() {
if (this.config.bufferCommands != null) {
return this.config.bufferCommands;
}
if (this.base.get('bufferCommands') != null) {
return this.base.get('bufferCommands');
}
return true;
};
/**
* error
*
* Graceful error handling, passes error to callback
* if available, else emits error on the connection.
*
* @param {Error} err
* @param {Function} callback optional
* @api private
*/
Connection.prototype.error = function(err, callback) {
if (callback) {
callback(err);
return null;
}
if (this.listeners('error').length > 0) {
this.emit('error', err);
}
return Promise.reject(err);
};
/**
* Called when the connection is opened
*
* @api private
*/
Connection.prototype.onOpen = function() {
this.readyState = STATES.connected;
for (const d of this._queue) {
d.fn.apply(d.ctx, d.args);
}
this._queue = [];
// avoid having the collection subscribe to our event emitter
// to prevent 0.3 warning
for (const i in this.collections) {
if (utils.object.hasOwnProperty(this.collections, i)) {
this.collections[i].onOpen();
}
}
this.emit('open');
};
/**
* Opens the connection with a URI using `MongoClient.connect()`.
*
* @param {String} uri The URI to connect with.
* @param {Object} [options] Passed on to http://mongodb.github.io/node-mongodb-native/2.2/api/MongoClient.html#connect
* @param {Boolean} [options.bufferCommands=true] Mongoose specific option. Set to false to [disable buffering](http://mongoosejs.com/docs/faq.html#callback_never_executes) on all models associated with this connection.
* @param {Number} [options.bufferTimeoutMS=10000] Mongoose specific option. If `bufferCommands` is true, Mongoose will throw an error after `bufferTimeoutMS` if the operation is still buffered.
* @param {String} [options.dbName] The name of the database we want to use. If not provided, use database name from connection string.
* @param {String} [options.user] username for authentication, equivalent to `options.auth.user`. Maintained for backwards compatibility.
* @param {String} [options.pass] password for authentication, equivalent to `options.auth.password`. Maintained for backwards compatibility.
* @param {Number} [options.maxPoolSize=100] The maximum number of sockets the MongoDB driver will keep open for this connection. Keep in mind that MongoDB only allows one operation per socket at a time, so you may want to increase this if you find you have a few slow queries that are blocking faster queries from proceeding. See [Slow Trains in MongoDB and Node.js](http://thecodebarbarian.com/slow-trains-in-mongodb-and-nodejs).
* @param {Number} [options.minPoolSize=0] The minimum number of sockets the MongoDB driver will keep open for this connection. Keep in mind that MongoDB only allows one operation per socket at a time, so you may want to increase this if you find you have a few slow queries that are blocking faster queries from proceeding. See [Slow Trains in MongoDB and Node.js](http://thecodebarbarian.com/slow-trains-in-mongodb-and-nodejs).
* @param {Number} [options.serverSelectionTimeoutMS] If `useUnifiedTopology = true`, the MongoDB driver will try to find a server to send any given operation to, and keep retrying for `serverSelectionTimeoutMS` milliseconds before erroring out. If not set, the MongoDB driver defaults to using `30000` (30 seconds).
* @param {Number} [options.heartbeatFrequencyMS] If `useUnifiedTopology = true`, the MongoDB driver sends a heartbeat every `heartbeatFrequencyMS` to check on the status of the connection. A heartbeat is subject to `serverSelectionTimeoutMS`, so the MongoDB driver will retry failed heartbeats for up to 30 seconds by default. Mongoose only emits a `'disconnected'` event after a heartbeat has failed, so you may want to decrease this setting to reduce the time between when your server goes down and when Mongoose emits `'disconnected'`. We recommend you do **not** set this setting below 1000, too many heartbeats can lead to performance degradation.
* @param {Boolean} [options.autoIndex=true] Mongoose-specific option. Set to false to disable automatic index creation for all models associated with this connection.
* @param {Class} [options.promiseLibrary] Sets the [underlying driver's promise library](http://mongodb.github.io/node-mongodb-native/3.1/api/MongoClient.html).
* @param {Number} [options.connectTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _during initial connection_. Defaults to 30000. This option is passed transparently to [Node.js' `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback).
* @param {Number} [options.socketTimeoutMS=30000] How long the MongoDB driver will wait before killing a socket due to inactivity _after initial connection_. A socket may be inactive because of either no activity or a long-running operation. This is set to `30000` by default, you should set this to 2-3x your longest running operation if you expect some of your database operations to run longer than 20 seconds. This option is passed to [Node.js `socket#setTimeout()` function](https://nodejs.org/api/net.html#net_socket_settimeout_timeout_callback) after the MongoDB driver successfully completes.
* @param {Number} [options.family=0] Passed transparently to [Node.js' `dns.lookup()`](https://nodejs.org/api/dns.html#dns_dns_lookup_hostname_options_callback) function. May be either `0, `4`, or `6`. `4` means use IPv4 only, `6` means use IPv6 only, `0` means try both.
* @param {Boolean} [options.autoCreate=false] Set to `true` to make Mongoose automatically call `createCollection()` on every model created on this connection.
* @param {Function} [callback]
* @returns {Connection} this
* @api public
*/
Connection.prototype.openUri = function(uri, options, callback) {
if (typeof options === 'function') {
callback = options;
options = null;
}
if (['string', 'number'].indexOf(typeof options) !== -1) {
throw new MongooseError('Mongoose 5.x no longer supports ' +
'`mongoose.connect(host, dbname, port)` or ' +
'`mongoose.createConnection(host, dbname, port)`. See ' +
'http://mongoosejs.com/docs/connections.html for supported connection syntax');
}
if (typeof uri !== 'string') {
throw new MongooseError('The `uri` parameter to `openUri()` must be a ' +
`string, got "${typeof uri}". Make sure the first parameter to ` +
'`mongoose.connect()` or `mongoose.createConnection()` is a string.');
}
if (callback != null && typeof callback !== 'function') {
throw new MongooseError('3rd parameter to `mongoose.connect()` or ' +
'`mongoose.createConnection()` must be a function, got "' +
typeof callback + '"');
}
if (this.readyState === STATES.connecting || this.readyState === STATES.connected) {
if (this._connectionString !== uri) {
throw new MongooseError('Can\'t call `openUri()` on an active connection with ' +
'different connection strings. Make sure you aren\'t calling `mongoose.connect()` ' +
'multiple times. See: https://mongoosejs.com/docs/connections.html#multiple_connections');
}
if (typeof callback === 'function') {
this.$initialConnection = this.$initialConnection.then(
() => callback(null, this),
err => callback(err)
);
}
return this;
}
this._connectionString = uri;
this.readyState = STATES.connecting;
this._closeCalled = false;
const Promise = PromiseProvider.get();
const _this = this;
options = processConnectionOptions(uri, options);
if (options) {
options = utils.clone(options);
const autoIndex = options.config && options.config.autoIndex != null ?
options.config.autoIndex :
options.autoIndex;
if (autoIndex != null) {
this.config.autoIndex = autoIndex !== false;
delete options.config;
delete options.autoIndex;
}
if ('autoCreate' in options) {
this.config.autoCreate = !!options.autoCreate;
delete options.autoCreate;
}
if ('sanitizeFilter' in options) {
this.config.sanitizeFilter = options.sanitizeFilter;
delete options.sanitizeFilter;
}
// Backwards compat
if (options.user || options.pass) {
options.auth = options.auth || {};
options.auth.username = options.user;
options.auth.password = options.pass;
this.user = options.user;
this.pass = options.pass;
}
delete options.user;
delete options.pass;
if (options.bufferCommands != null) {
this.config.bufferCommands = options.bufferCommands;
delete options.bufferCommands;
}
} else {
options = {};
}
this._connectionOptions = options;
const dbName = options.dbName;
if (dbName != null) {
this.$dbName = dbName;
}
delete options.dbName;
if (!utils.hasUserDefinedProperty(options, 'driverInfo')) {
options.driverInfo = {
name: 'Mongoose',
version: pkg.version
};
}
const promise = new Promise((resolve, reject) => {
let client;
try {
client = new mongodb.MongoClient(uri, options);
} catch (error) {
_this.readyState = STATES.disconnected;
return reject(error);
}
_this.client = client;
client.setMaxListeners(0);
client.connect((error) => {
if (error) {
return reject(error);
}
_setClient(_this, client, options, dbName);
resolve(_this);
});
});
const serverSelectionError = new ServerSelectionError();
this.$initialConnection = promise.
then(() => this).
catch(err => {
this.readyState = STATES.disconnected;
if (err != null && err.name === 'MongoServerSelectionError') {
err = serverSelectionError.assimilateError(err);
}
if (this.listeners('error').length > 0) {
immediate(() => this.emit('error', err));
}
throw err;
});
if (callback != null) {
this.$initialConnection = this.$initialConnection.then(
() => { callback(null, this); return this; },
err => callback(err)
);
}
return this.$initialConnection;
};
/*!
* ignore
*/
function _setClient(conn, client, options, dbName) {
const db = dbName != null ? client.db(dbName) : client.db();
conn.db = db;
conn.client = client;
conn.host = get(client, 's.options.hosts.0.host', void 0);
conn.port = get(client, 's.options.hosts.0.port', void 0);
conn.name = dbName != null ? dbName : get(client, 's.options.dbName', void 0);
conn._closeCalled = client._closeCalled;
const _handleReconnect = () => {
// If we aren't disconnected, we assume this reconnect is due to a
// socket timeout. If there's no activity on a socket for
// `socketTimeoutMS`, the driver will attempt to reconnect and emit
// this event.
if (conn.readyState !== STATES.connected) {
conn.readyState = STATES.connected;
conn.emit('reconnect');
conn.emit('reconnected');
conn.onOpen();
}
};
const type = get(client, 'topology.description.type', '');
if (type === 'Single') {
client.on('serverDescriptionChanged', ev => {
const newDescription = ev.newDescription;
if (newDescription.type === 'Standalone') {
_handleReconnect();
} else {
conn.readyState = STATES.disconnected;
}
});
} else if (type.startsWith('ReplicaSet')) {
client.on('topologyDescriptionChanged', ev => {
// Emit disconnected if we've lost connectivity to the primary
const description = ev.newDescription;
if (conn.readyState === STATES.connected && description.type !== 'ReplicaSetWithPrimary') {
// Implicitly emits 'disconnected'
conn.readyState = STATES.disconnected;
} else if (conn.readyState === STATES.disconnected && description.type === 'ReplicaSetWithPrimary') {
_handleReconnect();
}
});
}
conn.onOpen();
for (const i in conn.collections) {
if (utils.object.hasOwnProperty(conn.collections, i)) {
conn.collections[i].onOpen();
}
}
}
/**
* Closes the connection
*
* @param {Boolean} [force] optional
* @param {Function} [callback] optional
* @return {Promise}
* @api public
*/
Connection.prototype.close = function(force, callback) {
if (typeof force === 'function') {
callback = force;
force = false;
}
this.$wasForceClosed = !!force;
return promiseOrCallback(callback, cb => {
this._close(force, cb);
});
};
/**
* Handles closing the connection
*
* @param {Boolean} force
* @param {Function} callback
* @api private
*/
Connection.prototype._close = function(force, callback) {
const _this = this;
const closeCalled = this._closeCalled;
this._closeCalled = true;
if (this.client != null) {
this.client._closeCalled = true;
}
switch (this.readyState) {
case STATES.disconnected:
if (closeCalled) {
callback();
} else {
this.doClose(force, function(err) {
if (err) {
return callback(err);
}
_this.onClose(force);
callback(null);
});
}
break;
case STATES.connected:
this.readyState = STATES.disconnecting;
this.doClose(force, function(err) {
if (err) {
return callback(err);
}
_this.onClose(force);
callback(null);
});
break;
case STATES.connecting:
this.once('open', function() {
_this.close(callback);
});
break;
case STATES.disconnecting:
this.once('close', function() {
callback();
});
break;
}
return this;
};
/**
* Called when the connection closes
*
* @api private
*/
Connection.prototype.onClose = function(force) {
this.readyState = STATES.disconnected;
// avoid having the collection subscribe to our event emitter
// to prevent 0.3 warning
for (const i in this.collections) {
if (utils.object.hasOwnProperty(this.collections, i)) {
this.collections[i].onClose(force);
}
}
this.emit('close', force);
for (const db of this.otherDbs) {
db.close(force);
}
};
/**
* Retrieves a collection, creating it if not cached.
*
* Not typically needed by applications. Just talk to your collection through your model.
*
* @param {String} name of the collection
* @param {Object} [options] optional collection options
* @return {Collection} collection instance
* @api public
*/
Connection.prototype.collection = function(name, options) {
const defaultOptions = {
autoIndex: this.config.autoIndex != null ? this.config.autoIndex : this.base.options.autoIndex,
autoCreate: this.config.autoCreate != null ? this.config.autoCreate : this.base.options.autoCreate
};
options = Object.assign({}, defaultOptions, options ? utils.clone(options) : {});
options.$wasForceClosed = this.$wasForceClosed;
if (!(name in this.collections)) {
this.collections[name] = new Collection(name, this, options);
}
return this.collections[name];
};
/**
* Declares a plugin executed on all schemas you pass to `conn.model()`
*
* Equivalent to calling `.plugin(fn)` on each schema you create.
*
* ####Example:
* const db = mongoose.createConnection('mongodb://localhost:27017/mydb');
* db.plugin(() => console.log('Applied'));
* db.plugins.length; // 1
*
* db.model('Test', new Schema({})); // Prints "Applied"
*
* @param {Function} fn plugin callback
* @param {Object} [opts] optional options
* @return {Connection} this
* @see plugins ./plugins.html
* @api public
*/
Connection.prototype.plugin = function(fn, opts) {
this.plugins.push([fn, opts]);
return this;
};
/**
* Defines or retrieves a model.
*
* const mongoose = require('mongoose');
* const db = mongoose.createConnection(..);
* db.model('Venue', new Schema(..));
* const Ticket = db.model('Ticket', new Schema(..));
* const Venue = db.model('Venue');
*
* _When no `collection` argument is passed, Mongoose produces a collection name by passing the model `name` to the [utils.toCollectionName](#utils_exports.toCollectionName) method. This method pluralizes the name. If you don't like this behavior, either pass a collection name or set your schemas collection name option._
*
* ####Example:
*
* const schema = new Schema({ name: String }, { collection: 'actor' });
*
* // or
*
* schema.set('collection', 'actor');
*
* // or
*
* const collectionName = 'actor'
* const M = conn.model('Actor', schema, collectionName)
*
* @param {String|Function} name the model name or class extending Model
* @param {Schema} [schema] a schema. necessary when defining a model
* @param {String} [collection] name of mongodb collection (optional) if not given it will be induced from model name
* @param {Object} [options]
* @param {Boolean} [options.overwriteModels=false] If true, overwrite existing models with the same name to avoid `OverwriteModelError`
* @see Mongoose#model #index_Mongoose-model
* @return {Model} The compiled model
* @api public
*/
Connection.prototype.model = function(name, schema, collection, options) {
if (!(this instanceof Connection)) {
throw new MongooseError('`connection.model()` should not be run with ' +
'`new`. If you are doing `new db.model(foo)(bar)`, use ' +
'`db.model(foo)(bar)` instead');
}
let fn;
if (typeof name === 'function') {
fn = name;
name = fn.name;
}
// collection name discovery
if (typeof schema === 'string') {
collection = schema;
schema = false;
}
if (utils.isObject(schema) && !(schema instanceof this.base.Schema)) {
schema = new Schema(schema);
}
if (schema && !schema.instanceOfSchema) {
throw new Error('The 2nd parameter to `mongoose.model()` should be a ' +
'schema or a POJO');
}
const defaultOptions = { cache: false, overwriteModels: this.base.options.overwriteModels };
const opts = Object.assign(defaultOptions, options, { connection: this });
if (this.models[name] && !collection && opts.overwriteModels !== true) {
// model exists but we are not subclassing with custom collection
if (schema && schema.instanceOfSchema && schema !== this.models[name].schema) {
throw new MongooseError.OverwriteModelError(name);
}
return this.models[name];
}
let model;
if (schema && schema.instanceOfSchema) {
applyPlugins(schema, this.plugins, null, '$connectionPluginsApplied');
// compile a model
model = this.base._model(fn || name, schema, collection, opts);
// only the first model with this name is cached to allow
// for one-offs with custom collection names etc.
if (!this.models[name]) {
this.models[name] = model;
}
// Errors handled internally, so safe to ignore error
model.init(function $modelInitNoop() {});
return model;
}
if (this.models[name] && collection) {
// subclassing current model with alternate collection
model = this.models[name];
schema = model.prototype.schema;
const sub = model.__subclass(this, schema, collection);
// do not cache the sub model
return sub;
}
if (!model) {
throw new MongooseError.MissingSchemaError(name);
}
if (this === model.prototype.db
&& (!collection || collection === model.collection.name)) {
// model already uses this connection.
// only the first model with this name is cached to allow
// for one-offs with custom collection names etc.
if (!this.models[name]) {
this.models[name] = model;
}
return model;
}
this.models[name] = model.__subclass(this, schema, collection);
return this.models[name];
};
/**
* Removes the model named `name` from this connection, if it exists. You can
* use this function to clean up any models you created in your tests to
* prevent OverwriteModelErrors.
*
* ####Example:
*
* conn.model('User', new Schema({ name: String }));
* console.log(conn.model('User')); // Model object
* conn.deleteModel('User');
* console.log(conn.model('User')); // undefined
*
* // Usually useful in a Mocha `afterEach()` hook
* afterEach(function() {
* conn.deleteModel(/.+/); // Delete every model
* });
*
* @api public
* @param {String|RegExp} name if string, the name of the model to remove. If regexp, removes all models whose name matches the regexp.
* @return {Connection} this
*/
Connection.prototype.deleteModel = function(name) {
if (typeof name === 'string') {
const model = this.model(name);
if (model == null) {
return this;
}
const collectionName = model.collection.name;
delete this.models[name];
delete this.collections[collectionName];
this.emit('deleteModel', model);
} else if (name instanceof RegExp) {
const pattern = name;
const names = this.modelNames();
for (const name of names) {
if (pattern.test(name)) {
this.deleteModel(name);
}
}
} else {
throw new Error('First parameter to `deleteModel()` must be a string ' +
'or regexp, got "' + name + '"');
}
return this;
};
/**
* Watches the entire underlying database for changes. Similar to
* [`Model.watch()`](/docs/api/model.html#model_Model.watch).
*
* This function does **not** trigger any middleware. In particular, it
* does **not** trigger aggregate middleware.
*
* The ChangeStream object is an event emitter that emits the following events:
*
* - 'change': A change occurred, see below example
* - 'error': An unrecoverable error occurred. In particular, change streams currently error out if they lose connection to the replica set primary. Follow [this GitHub issue](https://github.com/Automattic/mongoose/issues/6799) for updates.
* - 'end': Emitted if the underlying stream is closed
* - 'close': Emitted if the underlying stream is closed
*
* ####Example:
*
* const User = conn.model('User', new Schema({ name: String }));
*
* const changeStream = conn.watch().on('change', data => console.log(data));
*
* // Triggers a 'change' event on the change stream.
* await User.create({ name: 'test' });
*
* @api public
* @param {Array} [pipeline]
* @param {Object} [options] passed without changes to [the MongoDB driver's `Db#watch()` function](https://mongodb.github.io/node-mongodb-native/3.4/api/Db.html#watch)
* @return {ChangeStream} mongoose-specific change stream wrapper, inherits from EventEmitter
*/
Connection.prototype.watch = function(pipeline, options) {
const disconnectedError = new MongooseError('Connection ' + this.id +
' was disconnected when calling `watch()`');
const changeStreamThunk = cb => {
immediate(() => {
if (this.readyState === STATES.connecting) {
this.once('open', function() {
const driverChangeStream = this.db.watch(pipeline, options);
cb(null, driverChangeStream);
});
} else if (this.readyState === STATES.disconnected && this.db == null) {
cb(disconnectedError);
} else {
const driverChangeStream = this.db.watch(pipeline, options);
cb(null, driverChangeStream);
}
});
};
const changeStream = new ChangeStream(changeStreamThunk, pipeline, options);
return changeStream;
};
/**
* Returns a promise that resolves when this connection
* successfully connects to MongoDB, or rejects if this connection failed
* to connect.
*
* ####Example:
* const conn = await mongoose.createConnection('mongodb://localhost:27017/test').
* asPromise();
* conn.readyState; // 1, means Mongoose is connected
*
* @api public
* @return {Promise}
*/
Connection.prototype.asPromise = function asPromise() {
return this.$initialConnection;
};
/**
* Returns an array of model names created on this connection.
* @api public
* @return {Array}
*/
Connection.prototype.modelNames = function() {
return Object.keys(this.models);
};
/**
* @brief Returns if the connection requires authentication after it is opened. Generally if a
* username and password are both provided than authentication is needed, but in some cases a
* password is not required.
* @api private
* @return {Boolean} true if the connection should be authenticated after it is opened, otherwise false.
*/
Connection.prototype.shouldAuthenticate = function() {
return this.user != null &&
(this.pass != null || this.authMechanismDoesNotRequirePassword());
};
/**
* @brief Returns a boolean value that specifies if the current authentication mechanism needs a
* password to authenticate according to the auth objects passed into the openUri methods.
* @api private
* @return {Boolean} true if the authentication mechanism specified in the options object requires
* a password, otherwise false.
*/
Connection.prototype.authMechanismDoesNotRequirePassword = function() {
if (this.options && this.options.auth) {
return noPasswordAuthMechanisms.indexOf(this.options.auth.authMechanism) >= 0;
}
return true;
};
/**
* @brief Returns a boolean value that specifies if the provided objects object provides enough
* data to authenticate with. Generally this is true if the username and password are both specified
* but in some authentication methods, a password is not required for authentication so only a username
* is required.
* @param {Object} [options] the options object passed into the openUri methods.
* @api private
* @return {Boolean} true if the provided options object provides enough data to authenticate with,
* otherwise false.
*/
Connection.prototype.optionsProvideAuthenticationData = function(options) {
return (options) &&
(options.user) &&
((options.pass) || this.authMechanismDoesNotRequirePassword());
};
/**
* Returns the [MongoDB driver `MongoClient`](http://mongodb.github.io/node-mongodb-native/3.5/api/MongoClient.html) instance
* that this connection uses to talk to MongoDB.
*
* ####Example:
* const conn = await mongoose.createConnection('mongodb://localhost:27017/test');
*
* conn.getClient(); // MongoClient { ... }
*
* @api public
* @return {MongoClient}
*/
Connection.prototype.getClient = function getClient() {
return this.client;
};
/**
* Set the [MongoDB driver `MongoClient`](http://mongodb.github.io/node-mongodb-native/3.5/api/MongoClient.html) instance
* that this connection uses to talk to MongoDB. This is useful if you already have a MongoClient instance, and want to
* reuse it.
*
* ####Example:
* const client = await mongodb.MongoClient.connect('mongodb://localhost:27017/test');
*
* const conn = mongoose.createConnection().setClient(client);
*
* conn.getClient(); // MongoClient { ... }
* conn.readyState; // 1, means 'CONNECTED'
*
* @api public
* @return {Connection} this
*/
Connection.prototype.setClient = function setClient(client) {
if (!(client instanceof mongodb.MongoClient)) {
throw new MongooseError('Must call `setClient()` with an instance of MongoClient');
}
if (this.client != null || this.readyState !== STATES.disconnected) {
throw new MongooseError('Cannot call `setClient()` on a connection that is already connected.');
}
if (client.topology == null) {
throw new MongooseError('Cannot call `setClient()` with a MongoClient that you have not called `connect()` on yet.');
}
this._connectionString = client.s.url;
_setClient(this, client, { useUnifiedTopology: client.s.options.useUnifiedTopology }, client.s.options.dbName);
return this;
};
/**
* Switches to a different database using the same connection pool.
*
* Returns a new connection object, with the new db.
*
* @method useDb
* @memberOf Connection
* @param {String} name The database name
* @param {Object} [options]
* @param {Boolean} [options.useCache=false] If true, cache results so calling `useDb()` multiple times with the same name only creates 1 connection object.
* @param {Boolean} [options.noListener=false] If true, the connection object will not make the db listen to events on the original connection. See [issue #9961](https://github.com/Automattic/mongoose/issues/9961).
* @return {Connection} New Connection Object
* @api public
*/
/*!
* Module exports.
*/
Connection.STATES = STATES;
module.exports = Connection;
| 1 | 14,830 | Love how we can finally use async/await in the codebase. | Automattic-mongoose | js |
@@ -3531,6 +3531,7 @@ void nano::rpc_handler::wallet_representative_set ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
+ bool update_existing_accounts (request.get<bool> ("update_existing_accounts", false));
if (!ec)
{
std::string representative_text (request.get<std::string> ("representative")); | 1 | #include <boost/algorithm/string.hpp>
#include <nano/lib/interface.h>
#include <nano/node/node.hpp>
#include <nano/node/rpc.hpp>
#ifdef NANO_SECURE_RPC
#include <nano/node/rpc_secure.hpp>
#endif
#include <nano/lib/errors.hpp>
nano::rpc_secure_config::rpc_secure_config () :
enable (false),
verbose_logging (false)
{
}
nano::error nano::rpc_secure_config::serialize_json (nano::jsonconfig & json) const
{
json.put ("enable", enable);
json.put ("verbose_logging", verbose_logging);
json.put ("server_key_passphrase", server_key_passphrase);
json.put ("server_cert_path", server_cert_path);
json.put ("server_key_path", server_key_path);
json.put ("server_dh_path", server_dh_path);
json.put ("client_certs_path", client_certs_path);
return json.get_error ();
}
nano::error nano::rpc_secure_config::deserialize_json (nano::jsonconfig & json)
{
json.get_required<bool> ("enable", enable);
json.get_required<bool> ("verbose_logging", verbose_logging);
json.get_required<std::string> ("server_key_passphrase", server_key_passphrase);
json.get_required<std::string> ("server_cert_path", server_cert_path);
json.get_required<std::string> ("server_key_path", server_key_path);
json.get_required<std::string> ("server_dh_path", server_dh_path);
json.get_required<std::string> ("client_certs_path", client_certs_path);
return json.get_error ();
}
nano::rpc_config::rpc_config () :
address (boost::asio::ip::address_v6::loopback ()),
port (nano::rpc::rpc_port),
enable_control (false),
frontier_request_limit (16384),
chain_request_limit (16384),
max_json_depth (20)
{
}
nano::rpc_config::rpc_config (bool enable_control_a) :
address (boost::asio::ip::address_v6::loopback ()),
port (nano::rpc::rpc_port),
enable_control (enable_control_a),
frontier_request_limit (16384),
chain_request_limit (16384),
max_json_depth (20)
{
}
nano::error nano::rpc_config::serialize_json (nano::jsonconfig & json) const
{
json.put ("address", address.to_string ());
json.put ("port", port);
json.put ("enable_control", enable_control);
json.put ("frontier_request_limit", frontier_request_limit);
json.put ("chain_request_limit", chain_request_limit);
json.put ("max_json_depth", max_json_depth);
return json.get_error ();
}
nano::error nano::rpc_config::deserialize_json (nano::jsonconfig & json)
{
auto rpc_secure_l (json.get_optional_child ("secure"));
if (rpc_secure_l)
{
secure.deserialize_json (*rpc_secure_l);
}
json.get_required<boost::asio::ip::address_v6> ("address", address);
json.get_optional<uint16_t> ("port", port);
json.get_optional<bool> ("enable_control", enable_control);
json.get_optional<uint64_t> ("frontier_request_limit", frontier_request_limit);
json.get_optional<uint64_t> ("chain_request_limit", chain_request_limit);
json.get_optional<uint8_t> ("max_json_depth", max_json_depth);
return json.get_error ();
}
nano::rpc::rpc (boost::asio::io_context & io_ctx_a, nano::node & node_a, nano::rpc_config const & config_a) :
acceptor (io_ctx_a),
config (config_a),
node (node_a)
{
}
void nano::rpc::start ()
{
auto endpoint (nano::tcp_endpoint (config.address, config.port));
acceptor.open (endpoint.protocol ());
acceptor.set_option (boost::asio::ip::tcp::acceptor::reuse_address (true));
boost::system::error_code ec;
acceptor.bind (endpoint, ec);
if (ec)
{
BOOST_LOG (node.log) << boost::str (boost::format ("Error while binding for RPC on port %1%: %2%") % endpoint.port () % ec.message ());
throw std::runtime_error (ec.message ());
}
acceptor.listen ();
node.observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::uint128_t const &, bool) {
observer_action (account_a);
});
accept ();
}
void nano::rpc::accept ()
{
auto connection (std::make_shared<nano::rpc_connection> (node, *this));
acceptor.async_accept (connection->socket, [this, connection](boost::system::error_code const & ec) {
if (acceptor.is_open () && ec != boost::asio::error::operation_aborted)
{
accept ();
}
if (!ec)
{
connection->parse_connection ();
}
else
{
BOOST_LOG (this->node.log) << boost::str (boost::format ("Error accepting RPC connections: %1% (%2%)") % ec.message () % ec.value ());
}
});
}
void nano::rpc::stop ()
{
acceptor.close ();
}
nano::rpc_handler::rpc_handler (nano::node & node_a, nano::rpc & rpc_a, std::string const & body_a, std::string const & request_id_a, std::function<void(boost::property_tree::ptree const &)> const & response_a) :
body (body_a),
request_id (request_id_a),
node (node_a),
rpc (rpc_a),
response (response_a)
{
}
void nano::rpc::observer_action (nano::account const & account_a)
{
std::shared_ptr<nano::payment_observer> observer;
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (payment_observers.find (account_a));
if (existing != payment_observers.end ())
{
observer = existing->second;
}
}
if (observer != nullptr)
{
observer->observe ();
}
}
void nano::error_response (std::function<void(boost::property_tree::ptree const &)> response_a, std::string const & message_a)
{
boost::property_tree::ptree response_l;
response_l.put ("error", message_a);
response_a (response_l);
}
void nano::rpc_handler::response_errors ()
{
if (ec || response_l.empty ())
{
boost::property_tree::ptree response_error;
response_error.put ("error", ec ? ec.message () : "Empty response");
response (response_error);
}
else
{
response (response_l);
}
}
std::shared_ptr<nano::wallet> nano::rpc_handler::wallet_impl ()
{
if (!ec)
{
std::string wallet_text (request.get<std::string> ("wallet"));
nano::uint256_union wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
return existing->second;
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
}
return nullptr;
}
nano::account nano::rpc_handler::account_impl (std::string account_text)
{
nano::account result (0);
if (!ec)
{
if (account_text.empty ())
{
account_text = request.get<std::string> ("account");
}
if (result.decode_account (account_text))
{
ec = nano::error_common::bad_account_number;
}
}
return result;
}
nano::amount nano::rpc_handler::amount_impl ()
{
nano::amount result (0);
if (!ec)
{
std::string amount_text (request.get<std::string> ("amount"));
if (result.decode_dec (amount_text))
{
ec = nano::error_common::invalid_amount;
}
}
return result;
}
nano::block_hash nano::rpc_handler::hash_impl (std::string search_text)
{
nano::block_hash result (0);
if (!ec)
{
std::string hash_text (request.get<std::string> (search_text));
if (result.decode_hex (hash_text))
{
ec = nano::error_blocks::invalid_block_hash;
}
}
return result;
}
nano::amount nano::rpc_handler::threshold_optional_impl ()
{
nano::amount result (0);
boost::optional<std::string> threshold_text (request.get_optional<std::string> ("threshold"));
if (!ec && threshold_text.is_initialized ())
{
if (result.decode_dec (threshold_text.get ()))
{
ec = nano::error_common::bad_threshold;
}
}
return result;
}
uint64_t nano::rpc_handler::work_optional_impl ()
{
uint64_t result (0);
boost::optional<std::string> work_text (request.get_optional<std::string> ("work"));
if (!ec && work_text.is_initialized ())
{
if (nano::from_string_hex (work_text.get (), result))
{
ec = nano::error_common::bad_work_format;
}
}
return result;
}
namespace
{
bool decode_unsigned (std::string const & text, uint64_t & number)
{
bool result;
size_t end;
try
{
number = std::stoull (text, &end);
result = false;
}
catch (std::invalid_argument const &)
{
result = true;
}
catch (std::out_of_range const &)
{
result = true;
}
result = result || end != text.size ();
return result;
}
}
uint64_t nano::rpc_handler::count_impl ()
{
uint64_t result (0);
if (!ec)
{
std::string count_text (request.get<std::string> ("count"));
if (decode_unsigned (count_text, result) || result == 0)
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::rpc_handler::count_optional_impl (uint64_t result)
{
boost::optional<std::string> count_text (request.get_optional<std::string> ("count"));
if (!ec && count_text.is_initialized ())
{
if (decode_unsigned (count_text.get (), result))
{
ec = nano::error_common::invalid_count;
}
}
return result;
}
uint64_t nano::rpc_handler::offset_optional_impl (uint64_t result)
{
boost::optional<std::string> offset_text (request.get_optional<std::string> ("offset"));
if (!ec && offset_text.is_initialized ())
{
if (decode_unsigned (offset_text.get (), result))
{
ec = nano::error_rpc::invalid_offset;
}
}
return result;
}
bool nano::rpc_handler::rpc_control_impl ()
{
bool result (false);
if (!ec)
{
if (!rpc.config.enable_control)
{
ec = nano::error_rpc::rpc_control_disabled;
}
else
{
result = true;
}
}
return result;
}
void nano::rpc_handler::account_balance ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.balance_pending (account));
response_l.put ("balance", balance.first.convert_to<std::string> ());
response_l.put ("pending", balance.second.convert_to<std::string> ());
}
response_errors ();
}
void nano::rpc_handler::account_block_count ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
nano::account_info info;
if (!node.store.account_get (transaction, account, info))
{
response_l.put ("block_count", std::to_string (info.block_count));
}
else
{
ec = nano::error_common::account_not_found;
}
}
response_errors ();
}
void nano::rpc_handler::account_create ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
const bool generate_work = request.get<bool> ("work", true);
nano::account new_key;
auto index_text (request.get_optional<std::string> ("index"));
if (index_text.is_initialized ())
{
uint64_t index;
if (decode_unsigned (index_text.get (), index) || index > static_cast<uint64_t> (std::numeric_limits<uint32_t>::max ()))
{
ec = nano::error_common::invalid_index;
}
else
{
new_key = wallet->deterministic_insert (static_cast<uint32_t> (index), generate_work);
}
}
else
{
new_key = wallet->deterministic_insert (generate_work);
}
if (!ec)
{
if (!new_key.is_zero ())
{
response_l.put ("account", new_key.to_account ());
}
else
{
ec = nano::error_common::wallet_locked;
}
}
}
response_errors ();
}
void nano::rpc_handler::account_get ()
{
std::string key_text (request.get<std::string> ("key"));
nano::uint256_union pub;
if (!pub.decode_hex (key_text))
{
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_public_key;
}
response_errors ();
}
void nano::rpc_handler::account_info ()
{
auto account (account_impl ());
if (!ec)
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
auto transaction (node.store.tx_begin_read ());
nano::account_info info;
if (!node.store.account_get (transaction, account, info))
{
response_l.put ("frontier", info.head.to_string ());
response_l.put ("open_block", info.open_block.to_string ());
response_l.put ("representative_block", info.rep_block.to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_l.put ("balance", balance);
response_l.put ("modified_timestamp", std::to_string (info.modified));
response_l.put ("block_count", std::to_string (info.block_count));
response_l.put ("account_version", info.epoch == nano::epoch::epoch_1 ? "1" : "0");
if (representative)
{
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
response_l.put ("representative", block->representative ().to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (transaction, account));
response_l.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_l.put ("pending", account_pending.convert_to<std::string> ());
}
}
else
{
ec = nano::error_common::account_not_found;
}
}
response_errors ();
}
void nano::rpc_handler::account_key ()
{
auto account (account_impl ());
if (!ec)
{
response_l.put ("key", account.to_string ());
}
response_errors ();
}
void nano::rpc_handler::account_list ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), j (wallet->store.end ()); i != j; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", nano::account (i->first).to_account ());
accounts.push_back (std::make_pair ("", entry));
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::rpc_handler::account_move ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
std::string source_text (request.get<std::string> ("source"));
auto accounts_text (request.get_child ("accounts"));
nano::uint256_union source;
if (!source.decode_hex (source_text))
{
auto existing (node.wallets.items.find (source));
if (existing != node.wallets.items.end ())
{
auto source (existing->second);
std::vector<nano::public_key> accounts;
for (auto i (accounts_text.begin ()), n (accounts_text.end ()); i != n; ++i)
{
nano::public_key account;
account.decode_account (i->second.get<std::string> (""));
accounts.push_back (account);
}
auto transaction (node.wallets.tx_begin_write ());
auto error (wallet->store.move (transaction, source->store, accounts));
response_l.put ("moved", error ? "0" : "1");
}
else
{
ec = nano::error_rpc::source_not_found;
}
}
else
{
ec = nano::error_rpc::bad_source;
}
}
response_errors ();
}
void nano::rpc_handler::account_remove ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
if (wallet->store.find (transaction, account) != wallet->store.end ())
{
wallet->store.erase (transaction, account);
response_l.put ("removed", "1");
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::rpc_handler::account_representative ()
{
auto account (account_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
nano::account_info info;
if (!node.store.account_get (transaction, account, info))
{
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
response_l.put ("representative", block->representative ().to_account ());
}
else
{
ec = nano::error_common::account_not_found;
}
}
response_errors ();
}
void nano::rpc_handler::account_representative_set ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
std::string representative_text (request.get<std::string> ("representative"));
nano::account representative;
if (!representative.decode_account (representative_text))
{
auto work (work_optional_impl ());
if (!ec && work)
{
auto transaction (node.store.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
nano::account_info info;
if (!node.store.account_get (transaction, account, info))
{
if (nano::work_validate (info.head, work))
{
ec = nano::error_common::invalid_work;
}
}
else
{
ec = nano::error_common::account_not_found;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
wallet->change_async (account, representative, [response_a](std::shared_ptr<nano::block> block) {
nano::block_hash hash (0);
if (block != nullptr)
{
hash = block->hash ();
}
boost::property_tree::ptree response_l;
response_l.put ("block", hash.to_string ());
response_a (response_l);
},
work, generate_work);
}
}
else
{
ec = nano::error_rpc::bad_representative_number;
}
}
// Because of change_async
if (ec)
{
response_errors ();
}
}
void nano::rpc_handler::account_weight ()
{
auto account (account_impl ());
if (!ec)
{
auto balance (node.weight (account));
response_l.put ("weight", balance.convert_to<std::string> ());
}
response_errors ();
}
void nano::rpc_handler::accounts_balances ()
{
boost::property_tree::ptree balances;
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree entry;
auto balance (node.balance_pending (account));
entry.put ("balance", balance.first.convert_to<std::string> ());
entry.put ("pending", balance.second.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
response_errors ();
}
void nano::rpc_handler::accounts_create ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto count (count_impl ());
if (!ec)
{
const bool generate_work = request.get<bool> ("work", false);
boost::property_tree::ptree accounts;
for (auto i (0); accounts.size () < count; ++i)
{
nano::account new_key (wallet->deterministic_insert (generate_work));
if (!new_key.is_zero ())
{
boost::property_tree::ptree entry;
entry.put ("", new_key.to_account ());
accounts.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::rpc_handler::accounts_frontiers ()
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
auto latest (node.ledger.latest (transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
}
response_l.add_child ("frontiers", frontiers);
response_errors ();
}
void nano::rpc_handler::accounts_pending ()
{
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool include_active = request.get<bool> ("include_active", false);
boost::property_tree::ptree pending;
auto transaction (node.store.tx_begin_read ());
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
boost::property_tree::ptree peers_l;
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key key (i->first);
std::shared_ptr<nano::block> block (include_active ? nullptr : node.store.block_get (transaction, key.hash));
if (include_active || (block && !node.active.active (*block)))
{
if (threshold.is_zero () && !source)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
pending_tree.put ("source", info.source.to_account ());
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
response_errors ();
}
void nano::rpc_handler::available_supply ()
{
auto genesis_balance (node.balance (nano::genesis_account)); // Cold storage genesis
auto landing_balance (node.balance (nano::account ("059F68AAB29DE0D3A27443625C7EA9CDDB6517A8B76FE37727EF6A4D76832AD5"))); // Active unavailable account
auto faucet_balance (node.balance (nano::account ("8E319CE6F3025E5B2DF66DA7AB1467FE48F1679C13DD43BFDB29FA2E9FC40D3B"))); // Faucet account
auto burned_balance ((node.balance_pending (nano::account (0))).second); // Burning 0 account
auto available (nano::genesis_amount - genesis_balance - landing_balance - faucet_balance - burned_balance);
response_l.put ("available", available.convert_to<std::string> ());
response_errors ();
}
void nano::rpc_handler::block ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
std::string contents;
block->serialize_json (contents);
response_l.put ("contents", contents);
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::block_confirm ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
node.block_confirm (std::move (block_l));
response_l.put ("started", "1");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::blocks ()
{
std::vector<std::string> hashes;
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::uint256_union hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
std::string contents;
block->serialize_json (contents);
blocks.put (hash_text, contents);
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
response_l.add_child ("blocks", blocks);
response_errors ();
}
void nano::rpc_handler::blocks_info ()
{
const bool pending = request.get<bool> ("pending", false);
const bool source = request.get<bool> ("source", false);
const bool balance = request.get<bool> ("balance", false);
std::vector<std::string> hashes;
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
for (boost::property_tree::ptree::value_type & hashes : request.get_child ("hashes"))
{
if (!ec)
{
std::string hash_text = hashes.second.data ();
nano::uint256_union hash;
if (!hash.decode_hex (hash_text))
{
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
boost::property_tree::ptree entry;
auto account (node.ledger.account (transaction, hash));
entry.put ("block_account", account.to_account ());
auto amount (node.ledger.amount (transaction, hash));
entry.put ("amount", amount.convert_to<std::string> ());
std::string contents;
block->serialize_json (contents);
entry.put ("contents", contents);
if (pending)
{
bool exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
entry.put ("pending", exists ? "1" : "0");
}
if (source)
{
nano::block_hash source_hash (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source_hash));
if (block_a != nullptr)
{
auto source_account (node.ledger.account (transaction, source_hash));
entry.put ("source_account", source_account.to_account ());
}
else
{
entry.put ("source_account", "0");
}
}
if (balance)
{
auto balance (node.ledger.balance (transaction, hash));
entry.put ("balance", balance.convert_to<std::string> ());
}
blocks.push_back (std::make_pair (hash_text, entry));
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
}
response_l.add_child ("blocks", blocks);
response_errors ();
}
void nano::rpc_handler::block_account ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, hash))
{
auto account (node.ledger.account (transaction, hash));
response_l.put ("account", account.to_account ());
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::block_count ()
{
auto transaction (node.store.tx_begin_read ());
response_l.put ("count", std::to_string (node.store.block_count (transaction).sum ()));
response_l.put ("unchecked", std::to_string (node.store.unchecked_count (transaction)));
response_errors ();
}
void nano::rpc_handler::block_count_type ()
{
auto transaction (node.store.tx_begin_read ());
nano::block_counts count (node.store.block_count (transaction));
response_l.put ("send", std::to_string (count.send));
response_l.put ("receive", std::to_string (count.receive));
response_l.put ("open", std::to_string (count.open));
response_l.put ("change", std::to_string (count.change));
response_l.put ("state_v0", std::to_string (count.state_v0));
response_l.put ("state_v1", std::to_string (count.state_v1));
response_l.put ("state", std::to_string (count.state_v0 + count.state_v1));
response_errors ();
}
void nano::rpc_handler::block_create ()
{
rpc_control_impl ();
if (!ec)
{
std::string type (request.get<std::string> ("type"));
nano::uint256_union wallet (0);
boost::optional<std::string> wallet_text (request.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized ())
{
if (wallet.decode_hex (wallet_text.get ()))
{
ec = nano::error_common::bad_wallet_number;
}
}
nano::uint256_union account (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (!ec && account_text.is_initialized ())
{
if (account.decode_account (account_text.get ()))
{
ec = nano::error_common::bad_account_number;
}
}
nano::uint256_union representative (0);
boost::optional<std::string> representative_text (request.get_optional<std::string> ("representative"));
if (!ec && representative_text.is_initialized ())
{
if (representative.decode_account (representative_text.get ()))
{
ec = nano::error_rpc::bad_representative_number;
}
}
nano::uint256_union destination (0);
boost::optional<std::string> destination_text (request.get_optional<std::string> ("destination"));
if (!ec && destination_text.is_initialized ())
{
if (destination.decode_account (destination_text.get ()))
{
ec = nano::error_rpc::bad_destination;
}
}
nano::block_hash source (0);
boost::optional<std::string> source_text (request.get_optional<std::string> ("source"));
if (!ec && source_text.is_initialized ())
{
if (source.decode_hex (source_text.get ()))
{
ec = nano::error_rpc::bad_source;
}
}
nano::uint128_union amount (0);
boost::optional<std::string> amount_text (request.get_optional<std::string> ("amount"));
if (!ec && amount_text.is_initialized ())
{
if (amount.decode_dec (amount_text.get ()))
{
ec = nano::error_common::invalid_amount;
}
}
auto work (work_optional_impl ());
nano::raw_key prv;
prv.data.clear ();
nano::uint256_union previous (0);
nano::uint128_union balance (0);
if (!ec && wallet != 0 && account != 0)
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
if (existing->second->store.valid_password (transaction))
{
if (existing->second->store.find (transaction, account) != existing->second->store.end ())
{
existing->second->store.fetch (transaction, account, prv);
previous = node.ledger.latest (block_transaction, account);
balance = node.ledger.account_balance (block_transaction, account);
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
boost::optional<std::string> key_text (request.get_optional<std::string> ("key"));
if (!ec && key_text.is_initialized ())
{
if (prv.data.decode_hex (key_text.get ()))
{
ec = nano::error_common::bad_private_key;
}
}
boost::optional<std::string> previous_text (request.get_optional<std::string> ("previous"));
if (!ec && previous_text.is_initialized ())
{
if (previous.decode_hex (previous_text.get ()))
{
ec = nano::error_rpc::bad_previous;
}
}
boost::optional<std::string> balance_text (request.get_optional<std::string> ("balance"));
if (!ec && balance_text.is_initialized ())
{
if (balance.decode_dec (balance_text.get ()))
{
ec = nano::error_rpc::invalid_balance;
}
}
nano::uint256_union link (0);
boost::optional<std::string> link_text (request.get_optional<std::string> ("link"));
if (!ec && link_text.is_initialized ())
{
if (link.decode_account (link_text.get ()))
{
if (link.decode_hex (link_text.get ()))
{
ec = nano::error_rpc::bad_link;
}
}
}
else
{
// Retrieve link from source or destination
link = source.is_zero () ? destination : source;
}
if (prv.data != 0)
{
nano::uint256_union pub (nano::pub_key (prv.data));
// Fetching account balance & previous for send blocks (if aren't given directly)
if (!previous_text.is_initialized () && !balance_text.is_initialized ())
{
auto transaction (node.store.tx_begin_read ());
previous = node.ledger.latest (transaction, pub);
balance = node.ledger.account_balance (transaction, pub);
}
// Double check current balance if previous block is specified
else if (previous_text.is_initialized () && balance_text.is_initialized () && type == "send")
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block_exists (transaction, previous) && node.store.block_balance (transaction, previous) != balance.number ())
{
ec = nano::error_rpc::block_create_balance_mismatch;
}
}
// Check for incorrect account key
if (!ec && account_text.is_initialized ())
{
if (account != pub)
{
ec = nano::error_rpc::block_create_public_key_mismatch;
}
}
if (type == "state")
{
if (previous_text.is_initialized () && !representative.is_zero () && (!link.is_zero () || link_text.is_initialized ()))
{
if (work == 0)
{
work = node.work_generate_blocking (previous.is_zero () ? pub : previous);
}
nano::state_block state (pub, previous, representative, balance, link, prv, pub, work);
response_l.put ("hash", state.hash ().to_string ());
std::string contents;
state.serialize_json (contents);
response_l.put ("block", contents);
}
else
{
ec = nano::error_rpc::block_create_requirements_state;
}
}
else if (type == "open")
{
if (representative != 0 && source != 0)
{
if (work == 0)
{
work = node.work_generate_blocking (pub);
}
nano::open_block open (source, representative, pub, prv, pub, work);
response_l.put ("hash", open.hash ().to_string ());
std::string contents;
open.serialize_json (contents);
response_l.put ("block", contents);
}
else
{
ec = nano::error_rpc::block_create_requirements_open;
}
}
else if (type == "receive")
{
if (source != 0 && previous != 0)
{
if (work == 0)
{
work = node.work_generate_blocking (previous);
}
nano::receive_block receive (previous, source, prv, pub, work);
response_l.put ("hash", receive.hash ().to_string ());
std::string contents;
receive.serialize_json (contents);
response_l.put ("block", contents);
}
else
{
ec = nano::error_rpc::block_create_requirements_receive;
}
}
else if (type == "change")
{
if (representative != 0 && previous != 0)
{
if (work == 0)
{
work = node.work_generate_blocking (previous);
}
nano::change_block change (previous, representative, prv, pub, work);
response_l.put ("hash", change.hash ().to_string ());
std::string contents;
change.serialize_json (contents);
response_l.put ("block", contents);
}
else
{
ec = nano::error_rpc::block_create_requirements_change;
}
}
else if (type == "send")
{
if (destination != 0 && previous != 0 && balance != 0 && amount != 0)
{
if (balance.number () >= amount.number ())
{
if (work == 0)
{
work = node.work_generate_blocking (previous);
}
nano::send_block send (previous, destination, balance.number () - amount.number (), prv, pub, work);
response_l.put ("hash", send.hash ().to_string ());
std::string contents;
send.serialize_json (contents);
response_l.put ("block", contents);
}
else
{
ec = nano::error_common::insufficient_balance;
}
}
else
{
ec = nano::error_rpc::block_create_requirements_send;
}
}
else
{
ec = nano::error_blocks::invalid_type;
}
}
else
{
ec = nano::error_rpc::block_create_key_required;
}
}
response_errors ();
}
void nano::rpc_handler::block_hash ()
{
std::string block_text (request.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (block_text);
boost::property_tree::read_json (block_stream, block_l);
block_l.put ("signature", "0");
block_l.put ("work", "0");
auto block (nano::deserialize_block_json (block_l));
if (block != nullptr)
{
response_l.put ("hash", block->hash ().to_string ());
}
else
{
ec = nano::error_blocks::invalid_block;
}
response_errors ();
}
void nano::rpc_handler::bootstrap ()
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
boost::system::error_code address_ec;
auto address (boost::asio::ip::address_v6::from_string (address_text, address_ec));
if (!address_ec)
{
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.bootstrap_initiator.bootstrap (nano::endpoint (address, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_common::invalid_port;
}
}
else
{
ec = nano::error_common::invalid_ip_address;
}
response_errors ();
}
void nano::rpc_handler::bootstrap_any ()
{
node.bootstrap_initiator.bootstrap ();
response_l.put ("success", "");
response_errors ();
}
void nano::rpc_handler::bootstrap_lazy ()
{
rpc_control_impl ();
auto hash (hash_impl ());
const bool force = request.get<bool> ("force", false);
if (!ec)
{
node.bootstrap_initiator.bootstrap_lazy (hash, force);
response_l.put ("started", "1");
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::rpc_handler::bootstrap_status ()
{
auto attempt (node.bootstrap_initiator.current_attempt ());
if (attempt != nullptr)
{
response_l.put ("clients", std::to_string (attempt->clients.size ()));
response_l.put ("pulls", std::to_string (attempt->pulls.size ()));
response_l.put ("pulling", std::to_string (attempt->pulling));
response_l.put ("connections", std::to_string (attempt->connections));
response_l.put ("idle", std::to_string (attempt->idle.size ()));
response_l.put ("target_connections", std::to_string (attempt->target_connections (attempt->pulls.size ())));
response_l.put ("total_blocks", std::to_string (attempt->total_blocks));
std::string mode_text;
if (attempt->mode == nano::bootstrap_mode::legacy)
{
mode_text = "legacy";
}
else if (attempt->mode == nano::bootstrap_mode::lazy)
{
mode_text = "lazy";
}
else if (attempt->mode == nano::bootstrap_mode::wallet_lazy)
{
mode_text = "wallet_lazy";
}
response_l.put ("mode", mode_text);
response_l.put ("lazy_blocks", std::to_string (attempt->lazy_blocks.size ()));
response_l.put ("lazy_state_unknown", std::to_string (attempt->lazy_state_unknown.size ()));
response_l.put ("lazy_balances", std::to_string (attempt->lazy_balances.size ()));
response_l.put ("lazy_pulls", std::to_string (attempt->lazy_pulls.size ()));
response_l.put ("lazy_stopped", std::to_string (attempt->lazy_stopped));
response_l.put ("lazy_keys", std::to_string (attempt->lazy_keys.size ()));
if (!attempt->lazy_keys.empty ())
{
response_l.put ("lazy_key_1", (*(attempt->lazy_keys.begin ())).to_string ());
}
}
else
{
response_l.put ("active", "0");
}
response_errors ();
}
void nano::rpc_handler::chain (bool successors)
{
successors = successors != request.get<bool> ("reverse", false);
auto hash (hash_impl ("block"));
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
while (!hash.is_zero () && blocks.size () < count)
{
auto block_l (node.store.block_get (transaction, hash));
if (block_l != nullptr)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
hash = successors ? node.store.block_successor (transaction, hash) : block_l->previous ();
}
else
{
hash.clear ();
}
}
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::rpc_handler::confirmation_active ()
{
uint64_t announcements (0);
boost::optional<std::string> announcements_text (request.get_optional<std::string> ("announcements"));
if (announcements_text.is_initialized ())
{
announcements = strtoul (announcements_text.get ().c_str (), NULL, 10);
}
boost::property_tree::ptree elections;
{
std::lock_guard<std::mutex> lock (node.active.mutex);
for (auto i (node.active.roots.begin ()), n (node.active.roots.end ()); i != n; ++i)
{
if (i->election->announcements >= announcements && !i->election->confirmed && !i->election->stopped)
{
boost::property_tree::ptree entry;
entry.put ("", i->root.to_string ());
elections.push_back (std::make_pair ("", entry));
}
}
}
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::rpc_handler::confirmation_history ()
{
boost::property_tree::ptree elections;
boost::property_tree::ptree confirmation_stats;
std::chrono::milliseconds running_total (0);
nano::block_hash hash (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("hash"));
if (hash_text.is_initialized ())
{
hash = hash_impl ();
}
if (!ec)
{
std::lock_guard<std::mutex> lock (node.active.mutex);
for (auto i (node.active.confirmed.begin ()), n (node.active.confirmed.end ()); i != n; ++i)
{
if (hash.is_zero () || i->winner->hash () == hash)
{
boost::property_tree::ptree election;
election.put ("hash", i->winner->hash ().to_string ());
election.put ("duration", i->election_duration.count ());
election.put ("time", i->election_end.count ());
election.put ("tally", i->tally.to_string_dec ());
elections.push_back (std::make_pair ("", election));
}
running_total += i->election_duration;
}
}
confirmation_stats.put ("count", elections.size ());
if (elections.size () >= 1)
{
confirmation_stats.put ("average", (running_total.count ()) / elections.size ());
}
response_l.add_child ("confirmation_stats", confirmation_stats);
response_l.add_child ("confirmations", elections);
response_errors ();
}
void nano::rpc_handler::confirmation_info ()
{
const bool representatives = request.get<bool> ("representatives", false);
const bool contents = request.get<bool> ("contents", true);
std::string root_text (request.get<std::string> ("root"));
nano::uint512_union root;
if (!root.decode_hex (root_text))
{
std::lock_guard<std::mutex> lock (node.active.mutex);
auto conflict_info (node.active.roots.find (root));
if (conflict_info != node.active.roots.end ())
{
response_l.put ("announcements", std::to_string (conflict_info->election->announcements));
auto election (conflict_info->election);
nano::uint128_t total (0);
response_l.put ("last_winner", election->status.winner->hash ().to_string ());
auto transaction (node.store.tx_begin_read ());
auto tally_l (election->tally (transaction));
boost::property_tree::ptree blocks;
for (auto i (tally_l.begin ()), n (tally_l.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
auto tally (i->first);
entry.put ("tally", tally.convert_to<std::string> ());
total += tally;
if (contents)
{
std::string contents;
i->second->serialize_json (contents);
entry.put ("contents", contents);
}
if (representatives)
{
std::multimap<nano::uint128_t, nano::account, std::greater<nano::uint128_t>> representatives;
for (auto ii (election->last_votes.begin ()), nn (election->last_votes.end ()); ii != nn; ++ii)
{
if (i->second->hash () == ii->second.hash)
{
nano::account representative (ii->first);
auto amount (node.store.representation_get (transaction, representative));
representatives.insert (std::make_pair (amount, representative));
}
}
boost::property_tree::ptree representatives_list;
for (auto ii (representatives.begin ()), nn (representatives.end ()); ii != nn; ++ii)
{
representatives_list.put (ii->second.to_account (), ii->first.convert_to<std::string> ());
}
entry.add_child ("representatives", representatives_list);
}
blocks.add_child ((i->second->hash ()).to_string (), entry);
}
response_l.put ("total_tally", total.convert_to<std::string> ());
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_rpc::confirmation_not_found;
}
}
else
{
ec = nano::error_rpc::invalid_root;
}
response_errors ();
}
void nano::rpc_handler::confirmation_quorum ()
{
response_l.put ("quorum_delta", node.delta ().convert_to<std::string> ());
response_l.put ("online_weight_quorum_percent", std::to_string (node.config.online_weight_quorum));
response_l.put ("online_weight_minimum", node.config.online_weight_minimum.to_string_dec ());
response_l.put ("online_stake_total", node.online_reps.online_stake_total.convert_to<std::string> ());
response_l.put ("peers_stake_total", node.peers.total_weight ().convert_to<std::string> ());
if (request.get<bool> ("peer_details", false))
{
boost::property_tree::ptree peers;
for (auto & peer : node.peers.list_probable_rep_weights ())
{
boost::property_tree::ptree peer_node;
peer_node.put ("account", peer.probable_rep_account.to_account ());
peer_node.put ("ip", peer.ip_address.to_string ());
peer_node.put ("weight", peer.rep_weight.to_string_dec ());
peers.push_back (std::make_pair ("", peer_node));
}
response_l.add_child ("peers", peers);
}
response_errors ();
}
void nano::rpc_handler::delegators ()
{
auto account (account_impl ());
if (!ec)
{
boost::property_tree::ptree delegators;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info info (i->second);
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
if (block->representative () == account)
{
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
delegators.put (nano::account (i->first).to_account (), balance);
}
}
response_l.add_child ("delegators", delegators);
}
response_errors ();
}
void nano::rpc_handler::delegators_count ()
{
auto account (account_impl ());
if (!ec)
{
uint64_t count (0);
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info info (i->second);
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
if (block->representative () == account)
{
++count;
}
}
response_l.put ("count", std::to_string (count));
}
response_errors ();
}
void nano::rpc_handler::deterministic_key ()
{
std::string seed_text (request.get<std::string> ("seed"));
std::string index_text (request.get<std::string> ("index"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
try
{
uint32_t index (std::stoul (index_text));
nano::uint256_union prv;
nano::deterministic_key (seed.data, index, prv);
nano::uint256_union pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
catch (std::logic_error const &)
{
ec = nano::error_common::invalid_index;
}
}
else
{
ec = nano::error_common::bad_seed;
}
response_errors ();
}
void nano::rpc_handler::frontiers ()
{
auto start (account_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && frontiers.size () < count; ++i)
{
frontiers.put (nano::account (i->first).to_account (), nano::account_info (i->second).head.to_string ());
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::rpc_handler::account_count ()
{
auto transaction (node.store.tx_begin_read ());
auto size (node.store.account_count (transaction));
response_l.put ("count", std::to_string (size));
response_errors ();
}
namespace
{
class history_visitor : public nano::block_visitor
{
public:
history_visitor (nano::rpc_handler & handler_a, bool raw_a, nano::transaction & transaction_a, boost::property_tree::ptree & tree_a, nano::block_hash const & hash_a) :
handler (handler_a),
raw (raw_a),
transaction (transaction_a),
tree (tree_a),
hash (hash_a)
{
}
virtual ~history_visitor () = default;
void send_block (nano::send_block const & block_a)
{
tree.put ("type", "send");
auto account (block_a.hashables.destination.to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("destination", account);
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void receive_block (nano::receive_block const & block_a)
{
tree.put ("type", "receive");
auto account (handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("account", account);
auto amount (handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
tree.put ("amount", amount);
if (raw)
{
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void open_block (nano::open_block const & block_a)
{
if (raw)
{
tree.put ("type", "open");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("source", block_a.hashables.source.to_string ());
tree.put ("opened", block_a.hashables.account.to_account ());
}
else
{
// Report opens as a receive
tree.put ("type", "receive");
}
if (block_a.hashables.source != nano::genesis_account)
{
tree.put ("account", handler.node.ledger.account (transaction, block_a.hashables.source).to_account ());
tree.put ("amount", handler.node.ledger.amount (transaction, hash).convert_to<std::string> ());
}
else
{
tree.put ("account", nano::genesis_account.to_account ());
tree.put ("amount", nano::genesis_amount.convert_to<std::string> ());
}
}
void change_block (nano::change_block const & block_a)
{
if (raw)
{
tree.put ("type", "change");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
}
void state_block (nano::state_block const & block_a)
{
if (raw)
{
tree.put ("type", "state");
tree.put ("representative", block_a.hashables.representative.to_account ());
tree.put ("link", block_a.hashables.link.to_string ());
tree.put ("balance", block_a.hashables.balance.to_string_dec ());
tree.put ("previous", block_a.hashables.previous.to_string ());
}
auto balance (block_a.hashables.balance.number ());
auto previous_balance (handler.node.ledger.balance (transaction, block_a.hashables.previous));
if (balance < previous_balance)
{
if (raw)
{
tree.put ("subtype", "send");
}
else
{
tree.put ("type", "send");
}
tree.put ("account", block_a.hashables.link.to_account ());
tree.put ("amount", (previous_balance - balance).convert_to<std::string> ());
}
else
{
if (block_a.hashables.link.is_zero ())
{
if (raw)
{
tree.put ("subtype", "change");
}
}
else if (balance == previous_balance && !handler.node.ledger.epoch_link.is_zero () && handler.node.ledger.is_epoch_link (block_a.hashables.link))
{
if (raw)
{
tree.put ("subtype", "epoch");
tree.put ("account", handler.node.ledger.epoch_signer.to_account ());
}
}
else
{
if (raw)
{
tree.put ("subtype", "receive");
}
else
{
tree.put ("type", "receive");
}
tree.put ("account", handler.node.ledger.account (transaction, block_a.hashables.link).to_account ());
tree.put ("amount", (balance - previous_balance).convert_to<std::string> ());
}
}
}
nano::rpc_handler & handler;
bool raw;
nano::transaction & transaction;
boost::property_tree::ptree & tree;
nano::block_hash const & hash;
};
}
void nano::rpc_handler::account_history ()
{
nano::account account;
bool output_raw (request.get_optional<bool> ("raw") == true);
nano::block_hash hash;
auto head_str (request.get_optional<std::string> ("head"));
auto transaction (node.store.tx_begin_read ());
if (head_str)
{
if (!hash.decode_hex (*head_str))
{
if (node.store.block_exists (transaction, hash))
{
account = node.ledger.account (transaction, hash);
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_blocks::bad_hash_number;
}
}
else
{
account = account_impl ();
if (!ec)
{
hash = node.ledger.latest (transaction, account);
}
}
auto count (count_impl ());
auto offset (offset_optional_impl (0));
if (!ec)
{
boost::property_tree::ptree history;
response_l.put ("account", account.to_account ());
auto block (node.store.block_get (transaction, hash));
while (block != nullptr && count > 0)
{
if (offset > 0)
{
--offset;
}
else
{
boost::property_tree::ptree entry;
history_visitor visitor (*this, output_raw, transaction, entry, hash);
block->visit (visitor);
if (!entry.empty ())
{
entry.put ("hash", hash.to_string ());
if (output_raw)
{
entry.put ("work", nano::to_string_hex (block->block_work ()));
entry.put ("signature", block->block_signature ().to_string ());
}
history.push_back (std::make_pair ("", entry));
--count;
}
}
hash = block->previous ();
block = node.store.block_get (transaction, hash);
}
response_l.add_child ("history", history);
if (!hash.is_zero ())
{
response_l.put ("previous", hash.to_string ());
}
}
response_errors ();
}
void nano::rpc_handler::keepalive ()
{
rpc_control_impl ();
if (!ec)
{
std::string address_text (request.get<std::string> ("address"));
std::string port_text (request.get<std::string> ("port"));
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.keepalive (address_text, port);
response_l.put ("started", "1");
}
else
{
ec = nano::error_common::invalid_port;
}
}
response_errors ();
}
void nano::rpc_handler::key_create ()
{
nano::keypair pair;
response_l.put ("private", pair.prv.data.to_string ());
response_l.put ("public", pair.pub.to_string ());
response_l.put ("account", pair.pub.to_account ());
response_errors ();
}
void nano::rpc_handler::key_expand ()
{
std::string key_text (request.get<std::string> ("key"));
nano::uint256_union prv;
if (!prv.decode_hex (key_text))
{
nano::uint256_union pub (nano::pub_key (prv));
response_l.put ("private", prv.to_string ());
response_l.put ("public", pub.to_string ());
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::bad_private_key;
}
response_errors ();
}
void nano::rpc_handler::ledger ()
{
rpc_control_impl ();
auto count (count_optional_impl ());
if (!ec)
{
nano::account start (0);
boost::optional<std::string> account_text (request.get_optional<std::string> ("account"));
if (account_text.is_initialized ())
{
if (start.decode_account (account_text.get ()))
{
ec = nano::error_common::bad_account_number;
}
}
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
modified_since = strtoul (modified_since_text.get ().c_str (), NULL, 10);
}
const bool sorting = request.get<bool> ("sorting", false);
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
boost::property_tree::ptree accounts;
auto transaction (node.store.tx_begin_read ());
if (!ec && !sorting) // Simple
{
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n && accounts.size () < count; ++i)
{
nano::account_info info (i->second);
if (info.modified >= modified_since)
{
nano::account account (i->first);
boost::property_tree::ptree response_a;
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", info.rep_block.to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
response_a.put ("representative", block->representative ().to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (transaction, account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
}
else if (!ec) // Sorting
{
std::vector<std::pair<nano::uint128_union, nano::account>> ledger_l;
for (auto i (node.store.latest_begin (transaction, start)), n (node.store.latest_end ()); i != n; ++i)
{
nano::account_info info (i->second);
nano::uint128_union balance (info.balance);
if (info.modified >= modified_since)
{
ledger_l.push_back (std::make_pair (balance, nano::account (i->first)));
}
}
std::sort (ledger_l.begin (), ledger_l.end ());
std::reverse (ledger_l.begin (), ledger_l.end ());
nano::account_info info;
for (auto i (ledger_l.begin ()), n (ledger_l.end ()); i != n && accounts.size () < count; ++i)
{
node.store.account_get (transaction, i->second, info);
nano::account account (i->second);
boost::property_tree::ptree response_a;
response_a.put ("frontier", info.head.to_string ());
response_a.put ("open_block", info.open_block.to_string ());
response_a.put ("representative_block", info.rep_block.to_string ());
std::string balance;
(i->first).encode_dec (balance);
response_a.put ("balance", balance);
response_a.put ("modified_timestamp", std::to_string (info.modified));
response_a.put ("block_count", std::to_string (info.block_count));
if (representative)
{
auto block (node.store.block_get (transaction, info.rep_block));
assert (block != nullptr);
response_a.put ("representative", block->representative ().to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (transaction, account));
response_a.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (transaction, account));
response_a.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), response_a));
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::rpc_handler::mrai_from_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () / ratio);
response_l.put ("amount", result.convert_to<std::string> ());
}
response_errors ();
}
void nano::rpc_handler::mrai_to_raw (nano::uint128_t ratio)
{
auto amount (amount_impl ());
if (!ec)
{
auto result (amount.number () * ratio);
if (result > amount.number ())
{
response_l.put ("amount", result.convert_to<std::string> ());
}
else
{
ec = nano::error_common::invalid_amount_big;
}
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::rpc_handler::node_id ()
{
rpc_control_impl ();
if (!ec)
{
response_l.put ("private", node.node_id.prv.data.to_string ());
response_l.put ("public", node.node_id.pub.to_string ());
response_l.put ("as_account", node.node_id.pub.to_account ());
}
response_errors ();
}
/*
* @warning This is an internal/diagnostic RPC, do not rely on its interface being stable
*/
void nano::rpc_handler::node_id_delete ()
{
rpc_control_impl ();
if (!ec)
{
auto transaction (node.store.tx_begin_write ());
node.store.delete_node_id (transaction);
response_l.put ("deleted", "1");
}
response_errors ();
}
void nano::rpc_handler::password_change ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_write ());
std::string password_text (request.get<std::string> ("password"));
auto error (wallet->store.rekey (transaction, password_text));
response_l.put ("changed", error ? "0" : "1");
}
response_errors ();
}
void nano::rpc_handler::password_enter ()
{
auto wallet (wallet_impl ());
if (!ec)
{
std::string password_text (request.get<std::string> ("password"));
auto transaction (wallet->wallets.tx_begin_write ());
auto error (wallet->enter_password (transaction, password_text));
response_l.put ("valid", error ? "0" : "1");
}
response_errors ();
}
void nano::rpc_handler::password_valid (bool wallet_locked)
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
if (!wallet_locked)
{
response_l.put ("valid", valid ? "1" : "0");
}
else
{
response_l.put ("locked", valid ? "0" : "1");
}
}
response_errors ();
}
void nano::rpc_handler::peers ()
{
boost::property_tree::ptree peers_l;
auto peers_list (node.peers.list_version ());
for (auto i (peers_list.begin ()), n (peers_list.end ()); i != n; ++i)
{
std::stringstream text;
text << i->first;
peers_l.push_back (boost::property_tree::ptree::value_type (text.str (), boost::property_tree::ptree (std::to_string (i->second))));
}
response_l.add_child ("peers", peers_l);
response_errors ();
}
void nano::rpc_handler::pending ()
{
auto account (account_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
if (!ec)
{
boost::property_tree::ptree peers_l;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.pending_begin (transaction, nano::pending_key (account, 0))); nano::pending_key (i->first).account == account && peers_l.size () < count; ++i)
{
nano::pending_key key (i->first);
std::shared_ptr<nano::block> block (include_active ? nullptr : node.store.block_get (transaction, key.hash));
if (include_active || (block && !node.active.active (*block)))
{
if (threshold.is_zero () && !source && !min_version)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (i->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", info.epoch == nano::epoch::epoch_1 ? "1" : "0");
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
response_l.add_child ("blocks", peers_l);
}
response_errors ();
}
void nano::rpc_handler::pending_exists ()
{
auto hash (hash_impl ());
const bool include_active = request.get<bool> ("include_active", false);
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
auto exists (false);
auto destination (node.ledger.block_destination (transaction, *block));
if (!destination.is_zero ())
{
exists = node.store.pending_exists (transaction, nano::pending_key (destination, hash));
}
exists = exists && (include_active || !node.active.active (*block));
response_l.put ("exists", exists ? "1" : "0");
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::payment_begin ()
{
std::string id_text (request.get<std::string> ("wallet"));
nano::uint256_union id;
if (!id.decode_hex (id_text))
{
auto existing (node.wallets.items.find (id));
if (existing != node.wallets.items.end ())
{
auto transaction (node.wallets.tx_begin_write ());
std::shared_ptr<nano::wallet> wallet (existing->second);
if (wallet->store.valid_password (transaction))
{
nano::account account (0);
do
{
auto existing (wallet->free_accounts.begin ());
if (existing != wallet->free_accounts.end ())
{
account = *existing;
wallet->free_accounts.erase (existing);
if (wallet->store.find (transaction, account) == wallet->store.end ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Transaction wallet %1% externally modified listing account %2% as free but no longer exists") % id.to_string () % account.to_account ());
account.clear ();
}
else
{
auto block_transaction (node.store.tx_begin_read ());
if (!node.ledger.account_balance (block_transaction, account).is_zero ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Skipping account %1% for use as a transaction account: non-zero balance") % account.to_account ());
account.clear ();
}
}
}
else
{
account = wallet->deterministic_insert (transaction);
break;
}
} while (account.is_zero ());
if (!account.is_zero ())
{
response_l.put ("account", account.to_account ());
}
else
{
ec = nano::error_rpc::payment_unable_create_account;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
response_errors ();
}
void nano::rpc_handler::payment_init ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
wallet->init_free_accounts (transaction);
response_l.put ("status", "Ready");
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::rpc_handler::payment_end ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
auto existing (wallet->store.find (transaction, account));
if (existing != wallet->store.end ())
{
if (node.ledger.account_balance (block_transaction, account).is_zero ())
{
wallet->free_accounts.insert (account);
response_l.put ("ended", "1");
}
else
{
ec = nano::error_rpc::payment_account_balance;
}
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
response_errors ();
}
void nano::rpc_handler::payment_wait ()
{
std::string timeout_text (request.get<std::string> ("timeout"));
auto account (account_impl ());
auto amount (amount_impl ());
if (!ec)
{
uint64_t timeout;
if (!decode_unsigned (timeout_text, timeout))
{
{
auto observer (std::make_shared<nano::payment_observer> (response, rpc, account, amount));
observer->start (timeout);
std::lock_guard<std::mutex> lock (rpc.mutex);
assert (rpc.payment_observers.find (account) == rpc.payment_observers.end ());
rpc.payment_observers[account] = observer;
}
rpc.observer_action (account);
}
else
{
ec = nano::error_rpc::bad_timeout;
}
}
if (ec)
{
response_errors ();
}
}
void nano::rpc_handler::process ()
{
std::string block_text (request.get<std::string> ("block"));
boost::property_tree::ptree block_l;
std::stringstream block_stream (block_text);
boost::property_tree::read_json (block_stream, block_l);
std::shared_ptr<nano::block> block (nano::deserialize_block_json (block_l));
if (block != nullptr)
{
if (!nano::work_validate (*block))
{
auto hash (block->hash ());
node.block_arrival.add (hash);
nano::process_return result;
{
auto transaction (node.store.tx_begin_write ());
result = node.block_processor.process_one (transaction, block, std::chrono::steady_clock::time_point ());
}
switch (result.code)
{
case nano::process_result::progress:
{
response_l.put ("hash", hash.to_string ());
break;
}
case nano::process_result::gap_previous:
{
ec = nano::error_process::gap_previous;
break;
}
case nano::process_result::gap_source:
{
ec = nano::error_process::gap_source;
break;
}
case nano::process_result::old:
{
ec = nano::error_process::old;
break;
}
case nano::process_result::bad_signature:
{
ec = nano::error_process::bad_signature;
break;
}
case nano::process_result::negative_spend:
{
// TODO once we get RPC versioning, this should be changed to "negative spend"
ec = nano::error_process::negative_spend;
break;
}
case nano::process_result::balance_mismatch:
{
ec = nano::error_process::balance_mismatch;
break;
}
case nano::process_result::unreceivable:
{
ec = nano::error_process::unreceivable;
break;
}
case nano::process_result::block_position:
{
ec = nano::error_process::block_position;
break;
}
case nano::process_result::fork:
{
const bool force = request.get<bool> ("force", false);
if (force && rpc.config.enable_control)
{
node.active.erase (*block);
node.block_processor.force (block);
response_l.put ("hash", hash.to_string ());
}
else
{
ec = nano::error_process::fork;
}
break;
}
default:
{
ec = nano::error_process::other;
break;
}
}
}
else
{
ec = nano::error_blocks::work_low;
}
}
else
{
ec = nano::error_blocks::invalid_block;
}
response_errors ();
}
void nano::rpc_handler::receive ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto account (account_impl ());
auto hash (hash_impl ("block"));
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
if (wallet->store.valid_password (transaction))
{
if (wallet->store.find (transaction, account) != wallet->store.end ())
{
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
if (node.store.pending_exists (transaction, nano::pending_key (account, hash)))
{
auto work (work_optional_impl ());
if (!ec && work)
{
nano::account_info info;
nano::uint256_union head;
if (!node.store.account_get (transaction, account, info))
{
head = info.head;
}
else
{
head = account;
}
if (nano::work_validate (head, work))
{
ec = nano::error_common::invalid_work;
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
auto response_a (response);
wallet->receive_async (std::move (block), account, nano::genesis_amount, [response_a](std::shared_ptr<nano::block> block_a) {
nano::uint256_union hash_a (0);
if (block_a != nullptr)
{
hash_a = block_a->hash ();
}
boost::property_tree::ptree response_l;
response_l.put ("block", hash_a.to_string ());
response_a (response_l);
},
work, generate_work);
}
}
else
{
ec = nano::error_process::unreceivable;
}
}
else
{
ec = nano::error_blocks::not_found;
}
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
// Because of receive_async
if (ec)
{
response_errors ();
}
}
void nano::rpc_handler::receive_minimum ()
{
rpc_control_impl ();
if (!ec)
{
response_l.put ("amount", node.config.receive_minimum.to_string_dec ());
}
response_errors ();
}
void nano::rpc_handler::receive_minimum_set ()
{
rpc_control_impl ();
auto amount (amount_impl ());
if (!ec)
{
node.config.receive_minimum = amount;
response_l.put ("success", "");
}
response_errors ();
}
void nano::rpc_handler::representatives ()
{
auto count (count_optional_impl ());
if (!ec)
{
const bool sorting = request.get<bool> ("sorting", false);
boost::property_tree::ptree representatives;
auto transaction (node.store.tx_begin_read ());
if (!sorting) // Simple
{
for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n && representatives.size () < count; ++i)
{
nano::account account (i->first);
auto amount (node.store.representation_get (transaction, account));
representatives.put (account.to_account (), amount.convert_to<std::string> ());
}
}
else // Sorting
{
std::vector<std::pair<nano::uint128_union, std::string>> representation;
for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n; ++i)
{
nano::account account (i->first);
auto amount (node.store.representation_get (transaction, account));
representation.push_back (std::make_pair (amount, account.to_account ()));
}
std::sort (representation.begin (), representation.end ());
std::reverse (representation.begin (), representation.end ());
for (auto i (representation.begin ()), n (representation.end ()); i != n && representatives.size () < count; ++i)
{
representatives.put (i->second, (i->first).number ().convert_to<std::string> ());
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::rpc_handler::representatives_online ()
{
const auto accounts_node = request.get_child_optional ("accounts");
const bool weight = request.get<bool> ("weight", false);
std::vector<nano::public_key> accounts_to_filter;
if (accounts_node.is_initialized ())
{
for (auto & a : (*accounts_node))
{
nano::public_key account;
auto error (account.decode_account (a.second.get<std::string> ("")));
if (!error)
{
accounts_to_filter.push_back (account);
}
else
{
ec = nano::error_common::bad_account_number;
break;
}
}
}
if (!ec)
{
boost::property_tree::ptree representatives;
auto transaction (node.store.tx_begin_read ());
auto reps (node.online_reps.list ());
for (auto & i : reps)
{
if (accounts_node.is_initialized ())
{
if (accounts_to_filter.empty ())
{
break;
}
auto found_acc = std::find (accounts_to_filter.begin (), accounts_to_filter.end (), i);
if (found_acc == accounts_to_filter.end ())
{
continue;
}
else
{
accounts_to_filter.erase (found_acc);
}
}
if (weight)
{
boost::property_tree::ptree weight_node;
auto account_weight (node.ledger.weight (transaction, i));
weight_node.put ("weight", account_weight.convert_to<std::string> ());
representatives.add_child (i.to_account (), weight_node);
}
else
{
boost::property_tree::ptree entry;
entry.put ("", i.to_account ());
representatives.push_back (std::make_pair ("", entry));
}
}
response_l.add_child ("representatives", representatives);
}
response_errors ();
}
void nano::rpc_handler::republish ()
{
auto count (count_optional_impl (1024U));
uint64_t sources (0);
uint64_t destinations (0);
boost::optional<std::string> sources_text (request.get_optional<std::string> ("sources"));
if (!ec && sources_text.is_initialized ())
{
if (decode_unsigned (sources_text.get (), sources))
{
ec = nano::error_rpc::invalid_sources;
}
}
boost::optional<std::string> destinations_text (request.get_optional<std::string> ("destinations"));
if (!ec && destinations_text.is_initialized ())
{
if (decode_unsigned (destinations_text.get (), destinations))
{
ec = nano::error_rpc::invalid_destinations;
}
}
auto hash (hash_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
auto transaction (node.store.tx_begin_read ());
auto block (node.store.block_get (transaction, hash));
if (block != nullptr)
{
std::deque<std::shared_ptr<nano::block>> republish_bundle;
for (auto i (0); !hash.is_zero () && i < count; ++i)
{
block = node.store.block_get (transaction, hash);
if (sources != 0) // Republish source chain
{
nano::block_hash source (node.ledger.block_source (transaction, *block));
auto block_a (node.store.block_get (transaction, source));
std::vector<nano::block_hash> hashes;
while (block_a != nullptr && hashes.size () < sources)
{
hashes.push_back (source);
source = block_a->previous ();
block_a = node.store.block_get (transaction, source);
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash_l : hashes)
{
block_a = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_a));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
republish_bundle.push_back (std::move (block)); // Republish block
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
if (destinations != 0) // Republish destination chain
{
auto block_b (node.store.block_get (transaction, hash));
auto destination (node.ledger.block_destination (transaction, *block_b));
if (!destination.is_zero ())
{
if (!node.store.pending_exists (transaction, nano::pending_key (destination, hash)))
{
nano::block_hash previous (node.ledger.latest (transaction, destination));
auto block_d (node.store.block_get (transaction, previous));
nano::block_hash source;
std::vector<nano::block_hash> hashes;
while (block_d != nullptr && hash != source)
{
hashes.push_back (previous);
source = node.ledger.block_source (transaction, *block_d);
previous = block_d->previous ();
block_d = node.store.block_get (transaction, previous);
}
std::reverse (hashes.begin (), hashes.end ());
if (hashes.size () > destinations)
{
hashes.resize (destinations);
}
for (auto & hash_l : hashes)
{
block_d = node.store.block_get (transaction, hash_l);
republish_bundle.push_back (std::move (block_d));
boost::property_tree::ptree entry_l;
entry_l.put ("", hash_l.to_string ());
blocks.push_back (std::make_pair ("", entry_l));
}
}
}
}
hash = node.store.block_successor (transaction, hash);
}
node.network.republish_block_batch (republish_bundle, 25);
response_l.put ("success", ""); // obsolete
response_l.add_child ("blocks", blocks);
}
else
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::search_pending ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
auto error (wallet->search_pending ());
response_l.put ("started", !error);
}
response_errors ();
}
void nano::rpc_handler::search_pending_all ()
{
rpc_control_impl ();
if (!ec)
{
node.wallets.search_pending_all ();
response_l.put ("success", "");
}
response_errors ();
}
void nano::rpc_handler::send ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto amount (amount_impl ());
// Sending 0 amount is invalid with state blocks
if (!ec && amount.is_zero ())
{
ec = nano::error_common::invalid_amount;
}
if (!ec)
{
std::string source_text (request.get<std::string> ("source"));
nano::account source;
if (!source.decode_account (source_text))
{
std::string destination_text (request.get<std::string> ("destination"));
nano::account destination;
if (!destination.decode_account (destination_text))
{
auto work (work_optional_impl ());
nano::uint128_t balance (0);
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
if (wallet->store.valid_password (transaction))
{
if (wallet->store.find (transaction, source) != wallet->store.end ())
{
nano::account_info info;
if (!node.store.account_get (block_transaction, source, info))
{
balance = (info.balance).number ();
}
else
{
ec = nano::error_common::account_not_found;
}
if (!ec && work)
{
if (nano::work_validate (info.head, work))
{
ec = nano::error_common::invalid_work;
}
}
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
else
{
ec = nano::error_common::wallet_locked;
}
}
if (!ec)
{
bool generate_work (work == 0); // Disable work generation if "work" option is provided
boost::optional<std::string> send_id (request.get_optional<std::string> ("id"));
auto rpc_l (shared_from_this ());
auto response_a (response);
wallet->send_async (source, destination, amount.number (), [balance, amount, response_a](std::shared_ptr<nano::block> block_a) {
if (block_a != nullptr)
{
nano::uint256_union hash (block_a->hash ());
boost::property_tree::ptree response_l;
response_l.put ("block", hash.to_string ());
response_a (response_l);
}
else
{
if (balance >= amount.number ())
{
error_response (response_a, "Error generating block");
}
else
{
std::error_code ec (nano::error_common::insufficient_balance);
error_response (response_a, ec.message ());
}
}
},
work, generate_work, send_id);
}
}
else
{
ec = nano::error_rpc::bad_destination;
}
}
else
{
ec = nano::error_rpc::bad_source;
}
}
// Because of send_async
if (ec)
{
response_errors ();
}
}
void nano::rpc_handler::stats ()
{
auto sink = node.stats.log_sink_json ();
std::string type (request.get<std::string> ("type", ""));
if (type == "counters")
{
node.stats.log_counters (*sink);
}
else if (type == "samples")
{
node.stats.log_samples (*sink);
}
else
{
ec = nano::error_rpc::invalid_missing_type;
}
if (!ec)
{
response (*static_cast<boost::property_tree::ptree *> (sink->to_object ()));
}
else
{
response_errors ();
}
}
void nano::rpc_handler::stop ()
{
rpc_control_impl ();
if (!ec)
{
response_l.put ("success", "");
}
response_errors ();
if (!ec)
{
rpc.stop ();
node.stop ();
}
}
void nano::rpc_handler::unchecked ()
{
auto count (count_optional_impl ());
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
auto block (i->second);
std::string contents;
block->serialize_json (contents);
unchecked.put (block->hash ().to_string (), contents);
}
response_l.add_child ("blocks", unchecked);
}
response_errors ();
}
void nano::rpc_handler::unchecked_clear ()
{
rpc_control_impl ();
if (!ec)
{
auto transaction (node.store.tx_begin_write ());
node.store.unchecked_clear (transaction);
response_l.put ("success", "");
}
response_errors ();
}
void nano::rpc_handler::unchecked_get ()
{
auto hash (hash_impl ());
if (!ec)
{
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction)), n (node.store.unchecked_end ()); i != n; ++i)
{
std::shared_ptr<nano::block> block (i->second);
if (block->hash () == hash)
{
std::string contents;
block->serialize_json (contents);
response_l.put ("contents", contents);
break;
}
}
if (response_l.empty ())
{
ec = nano::error_blocks::not_found;
}
}
response_errors ();
}
void nano::rpc_handler::unchecked_keys ()
{
auto count (count_optional_impl ());
nano::uint256_union key (0);
boost::optional<std::string> hash_text (request.get_optional<std::string> ("key"));
if (!ec && hash_text.is_initialized ())
{
if (key.decode_hex (hash_text.get ()))
{
ec = nano::error_rpc::bad_key;
}
}
if (!ec)
{
boost::property_tree::ptree unchecked;
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.unchecked_begin (transaction, nano::unchecked_key (key, 0))), n (node.store.unchecked_end ()); i != n && unchecked.size () < count; ++i)
{
boost::property_tree::ptree entry;
auto block (i->second);
std::string contents;
block->serialize_json (contents);
entry.put ("key", nano::block_hash (i->first.key ()).to_string ());
entry.put ("hash", block->hash ().to_string ());
entry.put ("contents", contents);
unchecked.push_back (std::make_pair ("", entry));
}
response_l.add_child ("unchecked", unchecked);
}
response_errors ();
}
void nano::rpc_handler::version ()
{
response_l.put ("rpc_version", "1");
response_l.put ("store_version", std::to_string (node.store_version ()));
response_l.put ("protocol_version", std::to_string (nano::protocol_version));
response_l.put ("node_vendor", boost::str (boost::format ("Nano %1%.%2%") % NANO_VERSION_MAJOR % NANO_VERSION_MINOR));
response_errors ();
}
void nano::rpc_handler::validate_account_number ()
{
std::string account_text (request.get<std::string> ("account"));
nano::uint256_union account;
auto error (account.decode_account (account_text));
response_l.put ("valid", error ? "0" : "1");
response_errors ();
}
void nano::rpc_handler::wallet_add ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
std::string key_text (request.get<std::string> ("key"));
nano::raw_key key;
if (!key.data.decode_hex (key_text))
{
const bool generate_work = request.get<bool> ("work", true);
auto pub (wallet->insert_adhoc (key, generate_work));
if (!pub.is_zero ())
{
response_l.put ("account", pub.to_account ());
}
else
{
ec = nano::error_common::wallet_locked;
}
}
else
{
ec = nano::error_common::bad_private_key;
}
}
response_errors ();
}
void nano::rpc_handler::wallet_add_watch ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
for (auto & accounts : request.get_child ("accounts"))
{
auto account (account_impl (accounts.second.data ()));
if (!ec)
{
wallet->insert_watch (transaction, account);
}
}
response_l.put ("success", "");
}
else
{
ec = nano::error_common::wallet_locked;
}
}
response_errors ();
}
void nano::rpc_handler::wallet_info ()
{
auto wallet (wallet_impl ());
if (!ec)
{
nano::uint128_t balance (0);
nano::uint128_t pending (0);
uint64_t count (0);
uint64_t deterministic_count (0);
uint64_t adhoc_count (0);
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
balance = balance + node.ledger.account_balance (block_transaction, account);
pending = pending + node.ledger.account_pending (block_transaction, account);
nano::key_type key_type (wallet->store.key_type (i->second));
if (key_type == nano::key_type::deterministic)
{
deterministic_count++;
}
else if (key_type == nano::key_type::adhoc)
{
adhoc_count++;
}
count++;
}
uint32_t deterministic_index (wallet->store.deterministic_index_get (transaction));
response_l.put ("balance", balance.convert_to<std::string> ());
response_l.put ("pending", pending.convert_to<std::string> ());
response_l.put ("accounts_count", std::to_string (count));
response_l.put ("deterministic_count", std::to_string (deterministic_count));
response_l.put ("adhoc_count", std::to_string (adhoc_count));
response_l.put ("deterministic_index", std::to_string (deterministic_index));
}
response_errors ();
}
void nano::rpc_handler::wallet_balances ()
{
auto wallet (wallet_impl ());
auto threshold (threshold_optional_impl ());
if (!ec)
{
boost::property_tree::ptree balances;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
nano::uint128_t balance = node.ledger.account_balance (block_transaction, account);
if (balance >= threshold.number ())
{
boost::property_tree::ptree entry;
nano::uint128_t pending = node.ledger.account_pending (block_transaction, account);
entry.put ("balance", balance.convert_to<std::string> ());
entry.put ("pending", pending.convert_to<std::string> ());
balances.push_back (std::make_pair (account.to_account (), entry));
}
}
response_l.add_child ("balances", balances);
}
response_errors ();
}
void nano::rpc_handler::wallet_change_seed ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
std::string seed_text (request.get<std::string> ("seed"));
nano::raw_key seed;
if (!seed.data.decode_hex (seed_text))
{
auto count (count_optional_impl (0));
auto transaction (node.wallets.tx_begin_write ());
if (wallet->store.valid_password (transaction))
{
wallet->change_seed (transaction, seed, count);
response_l.put ("success", "");
}
else
{
ec = nano::error_common::wallet_locked;
}
}
else
{
ec = nano::error_common::bad_seed;
}
}
response_errors ();
}
void nano::rpc_handler::wallet_contains ()
{
auto account (account_impl ());
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto exists (wallet->store.find (transaction, account) != wallet->store.end ());
response_l.put ("exists", exists ? "1" : "0");
}
response_errors ();
}
void nano::rpc_handler::wallet_create ()
{
rpc_control_impl ();
if (!ec)
{
nano::raw_key seed;
auto seed_text (request.get_optional<std::string> ("seed"));
if (seed_text.is_initialized () && seed.data.decode_hex (seed_text.get ()))
{
ec = nano::error_common::bad_seed;
}
if (!ec)
{
nano::keypair wallet_id;
auto wallet (node.wallets.create (wallet_id.pub));
auto existing (node.wallets.items.find (wallet_id.pub));
if (existing != node.wallets.items.end ())
{
response_l.put ("wallet", wallet_id.pub.to_string ());
}
else
{
ec = nano::error_common::wallet_lmdb_max_dbs;
}
if (!ec && seed_text.is_initialized ())
{
auto transaction (node.wallets.tx_begin_write ());
nano::public_key account (wallet->change_seed (transaction, seed));
response_l.put ("account", account.to_account ());
}
}
}
response_errors ();
}
void nano::rpc_handler::wallet_destroy ()
{
rpc_control_impl ();
if (!ec)
{
std::string wallet_text (request.get<std::string> ("wallet"));
nano::uint256_union wallet;
if (!wallet.decode_hex (wallet_text))
{
auto existing (node.wallets.items.find (wallet));
if (existing != node.wallets.items.end ())
{
node.wallets.destroy (wallet);
bool destroyed (node.wallets.items.find (wallet) == node.wallets.items.end ());
response_l.put ("destroyed", destroyed ? "1" : "0");
}
else
{
ec = nano::error_common::wallet_not_found;
}
}
else
{
ec = nano::error_common::bad_wallet_number;
}
}
response_errors ();
}
void nano::rpc_handler::wallet_export ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
std::string json;
wallet->store.serialize_json (transaction, json);
response_l.put ("json", json);
}
response_errors ();
}
void nano::rpc_handler::wallet_frontiers ()
{
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree frontiers;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
if (!latest.is_zero ())
{
frontiers.put (account.to_account (), latest.to_string ());
}
}
response_l.add_child ("frontiers", frontiers);
}
response_errors ();
}
void nano::rpc_handler::wallet_key_valid ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
auto valid (wallet->store.valid_password (transaction));
response_l.put ("valid", valid ? "1" : "0");
}
response_errors ();
}
void nano::rpc_handler::wallet_ledger ()
{
const bool representative = request.get<bool> ("representative", false);
const bool weight = request.get<bool> ("weight", false);
const bool pending = request.get<bool> ("pending", false);
uint64_t modified_since (0);
boost::optional<std::string> modified_since_text (request.get_optional<std::string> ("modified_since"));
if (modified_since_text.is_initialized ())
{
modified_since = strtoul (modified_since_text.get ().c_str (), NULL, 10);
}
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree accounts;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
if (info.modified >= modified_since)
{
boost::property_tree::ptree entry;
entry.put ("frontier", info.head.to_string ());
entry.put ("open_block", info.open_block.to_string ());
entry.put ("representative_block", info.rep_block.to_string ());
std::string balance;
nano::uint128_union (info.balance).encode_dec (balance);
entry.put ("balance", balance);
entry.put ("modified_timestamp", std::to_string (info.modified));
entry.put ("block_count", std::to_string (info.block_count));
if (representative)
{
auto block (node.store.block_get (block_transaction, info.rep_block));
assert (block != nullptr);
entry.put ("representative", block->representative ().to_account ());
}
if (weight)
{
auto account_weight (node.ledger.weight (block_transaction, account));
entry.put ("weight", account_weight.convert_to<std::string> ());
}
if (pending)
{
auto account_pending (node.ledger.account_pending (block_transaction, account));
entry.put ("pending", account_pending.convert_to<std::string> ());
}
accounts.push_back (std::make_pair (account.to_account (), entry));
}
}
}
response_l.add_child ("accounts", accounts);
}
response_errors ();
}
void nano::rpc_handler::wallet_lock ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
nano::raw_key empty;
empty.data.clear ();
wallet->store.password.value_set (empty);
response_l.put ("locked", "1");
}
response_errors ();
}
void nano::rpc_handler::wallet_pending ()
{
auto wallet (wallet_impl ());
auto count (count_optional_impl ());
auto threshold (threshold_optional_impl ());
const bool source = request.get<bool> ("source", false);
const bool min_version = request.get<bool> ("min_version", false);
const bool include_active = request.get<bool> ("include_active", false);
if (!ec)
{
boost::property_tree::ptree pending;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
boost::property_tree::ptree peers_l;
for (auto ii (node.store.pending_begin (block_transaction, nano::pending_key (account, 0))); nano::pending_key (ii->first).account == account && peers_l.size () < count; ++ii)
{
nano::pending_key key (ii->first);
std::shared_ptr<nano::block> block (include_active ? nullptr : node.store.block_get (block_transaction, key.hash));
if (include_active || (block && !node.active.active (*block)))
{
if (threshold.is_zero () && !source)
{
boost::property_tree::ptree entry;
entry.put ("", key.hash.to_string ());
peers_l.push_back (std::make_pair ("", entry));
}
else
{
nano::pending_info info (ii->second);
if (info.amount.number () >= threshold.number ())
{
if (source || min_version)
{
boost::property_tree::ptree pending_tree;
pending_tree.put ("amount", info.amount.number ().convert_to<std::string> ());
if (source)
{
pending_tree.put ("source", info.source.to_account ());
}
if (min_version)
{
pending_tree.put ("min_version", info.epoch == nano::epoch::epoch_1 ? "1" : "0");
}
peers_l.add_child (key.hash.to_string (), pending_tree);
}
else
{
peers_l.put (key.hash.to_string (), info.amount.number ().convert_to<std::string> ());
}
}
}
}
}
if (!peers_l.empty ())
{
pending.add_child (account.to_account (), peers_l);
}
}
response_l.add_child ("blocks", pending);
}
response_errors ();
}
void nano::rpc_handler::wallet_representative ()
{
auto wallet (wallet_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
response_l.put ("representative", wallet->store.representative (transaction).to_account ());
}
response_errors ();
}
void nano::rpc_handler::wallet_representative_set ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
std::string representative_text (request.get<std::string> ("representative"));
nano::account representative;
if (!representative.decode_account (representative_text))
{
{
auto transaction (node.wallets.tx_begin_write ());
wallet->store.representative_set (transaction, representative);
}
// Change representative for all wallet accounts
if (request.get<bool> ("update_existing_accounts", false))
{
std::vector<nano::account> accounts;
{
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_write ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
nano::account_info info;
if (!node.store.account_get (block_transaction, account, info))
{
auto block (node.store.block_get (block_transaction, info.rep_block));
assert (block != nullptr);
if (block->representative () != representative)
{
accounts.push_back (account);
}
}
}
}
for (auto & account : accounts)
{
wallet->change_async (account, representative, [](std::shared_ptr<nano::block>) {}, 0, false);
}
}
response_l.put ("set", "1");
}
else
{
ec = nano::error_rpc::bad_representative_number;
}
}
response_errors ();
}
void nano::rpc_handler::wallet_republish ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto count (count_impl ());
if (!ec)
{
boost::property_tree::ptree blocks;
std::deque<std::shared_ptr<nano::block>> republish_bundle;
auto transaction (node.wallets.tx_begin_read ());
auto block_transaction (node.store.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
auto latest (node.ledger.latest (block_transaction, account));
std::shared_ptr<nano::block> block;
std::vector<nano::block_hash> hashes;
while (!latest.is_zero () && hashes.size () < count)
{
hashes.push_back (latest);
block = node.store.block_get (block_transaction, latest);
latest = block->previous ();
}
std::reverse (hashes.begin (), hashes.end ());
for (auto & hash : hashes)
{
block = node.store.block_get (block_transaction, hash);
republish_bundle.push_back (std::move (block));
boost::property_tree::ptree entry;
entry.put ("", hash.to_string ());
blocks.push_back (std::make_pair ("", entry));
}
}
node.network.republish_block_batch (republish_bundle, 25);
response_l.add_child ("blocks", blocks);
}
response_errors ();
}
void nano::rpc_handler::wallet_work_get ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
if (!ec)
{
boost::property_tree::ptree works;
auto transaction (node.wallets.tx_begin_read ());
for (auto i (wallet->store.begin (transaction)), n (wallet->store.end ()); i != n; ++i)
{
nano::account account (i->first);
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
works.put (account.to_account (), nano::to_string_hex (work));
}
response_l.add_child ("works", works);
}
response_errors ();
}
void nano::rpc_handler::work_generate ()
{
rpc_control_impl ();
auto hash (hash_impl ());
if (!ec)
{
bool use_peers (request.get_optional<bool> ("use_peers") == true);
auto rpc_l (shared_from_this ());
auto callback = [rpc_l](boost::optional<uint64_t> const & work_a) {
if (work_a)
{
boost::property_tree::ptree response_l;
response_l.put ("work", nano::to_string_hex (work_a.value ()));
rpc_l->response (response_l);
}
else
{
error_response (rpc_l->response, "Cancelled");
}
};
if (!use_peers)
{
node.work.generate (hash, callback);
}
else
{
node.work_generate (hash, callback);
}
}
// Because of callback
if (ec)
{
response_errors ();
}
}
void nano::rpc_handler::work_cancel ()
{
rpc_control_impl ();
auto hash (hash_impl ());
if (!ec)
{
node.work.cancel (hash);
}
response_errors ();
}
void nano::rpc_handler::work_get ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto account (account_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_read ());
if (wallet->store.find (transaction, account) != wallet->store.end ())
{
uint64_t work (0);
auto error_work (wallet->store.work_get (transaction, account, work));
(void)error_work;
response_l.put ("work", nano::to_string_hex (work));
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
response_errors ();
}
void nano::rpc_handler::work_set ()
{
rpc_control_impl ();
auto wallet (wallet_impl ());
auto account (account_impl ());
auto work (work_optional_impl ());
if (!ec)
{
auto transaction (node.wallets.tx_begin_write ());
if (wallet->store.find (transaction, account) != wallet->store.end ())
{
wallet->store.work_put (transaction, account, work);
response_l.put ("success", "");
}
else
{
ec = nano::error_common::account_not_found_wallet;
}
}
response_errors ();
}
void nano::rpc_handler::work_validate ()
{
auto hash (hash_impl ());
auto work (work_optional_impl ());
if (!ec)
{
auto validate (nano::work_validate (hash, work));
response_l.put ("valid", validate ? "0" : "1");
}
response_errors ();
}
void nano::rpc_handler::work_peer_add ()
{
rpc_control_impl ();
if (!ec)
{
std::string address_text = request.get<std::string> ("address");
std::string port_text = request.get<std::string> ("port");
uint16_t port;
if (!nano::parse_port (port_text, port))
{
node.config.work_peers.push_back (std::make_pair (address_text, port));
response_l.put ("success", "");
}
else
{
ec = nano::error_common::invalid_port;
}
}
response_errors ();
}
void nano::rpc_handler::work_peers ()
{
rpc_control_impl ();
if (!ec)
{
boost::property_tree::ptree work_peers_l;
for (auto i (node.config.work_peers.begin ()), n (node.config.work_peers.end ()); i != n; ++i)
{
boost::property_tree::ptree entry;
entry.put ("", boost::str (boost::format ("%1%:%2%") % i->first % i->second));
work_peers_l.push_back (std::make_pair ("", entry));
}
response_l.add_child ("work_peers", work_peers_l);
}
response_errors ();
}
void nano::rpc_handler::work_peers_clear ()
{
rpc_control_impl ();
if (!ec)
{
node.config.work_peers.clear ();
response_l.put ("success", "");
}
response_errors ();
}
nano::rpc_connection::rpc_connection (nano::node & node_a, nano::rpc & rpc_a) :
node (node_a.shared ()),
rpc (rpc_a),
socket (node_a.io_ctx)
{
responded.clear ();
}
void nano::rpc_connection::parse_connection ()
{
read ();
}
void nano::rpc_connection::write_result (std::string body, unsigned version)
{
if (!responded.test_and_set ())
{
res.set ("Content-Type", "application/json");
res.set ("Access-Control-Allow-Origin", "*");
res.set ("Access-Control-Allow-Headers", "Accept, Accept-Language, Content-Language, Content-Type");
res.set ("Connection", "close");
res.result (boost::beast::http::status::ok);
res.body () = body;
res.version (version);
res.prepare_payload ();
}
else
{
assert (false && "RPC already responded and should only respond once");
// Guards `res' from being clobbered while async_write is being serviced
}
}
void nano::rpc_connection::read ()
{
auto this_l (shared_from_this ());
boost::beast::http::async_read (socket, buffer, request, [this_l](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
this_l->node->background ([this_l]() {
auto start (std::chrono::steady_clock::now ());
auto version (this_l->request.version ());
std::string request_id (boost::str (boost::format ("%1%") % boost::io::group (std::hex, std::showbase, reinterpret_cast<uintptr_t> (this_l.get ()))));
auto response_handler ([this_l, version, start, request_id](boost::property_tree::ptree const & tree_a) {
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree_a);
ostream.flush ();
auto body (ostream.str ());
this_l->write_result (body, version);
boost::beast::http::async_write (this_l->socket, this_l->res, [this_l](boost::system::error_code const & ec, size_t bytes_transferred) {
});
if (this_l->node->config.logging.log_rpc ())
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("RPC request %2% completed in: %1% microseconds") % std::chrono::duration_cast<std::chrono::microseconds> (std::chrono::steady_clock::now () - start).count () % request_id);
}
});
if (this_l->request.method () == boost::beast::http::verb::post)
{
auto handler (std::make_shared<nano::rpc_handler> (*this_l->node, this_l->rpc, this_l->request.body (), request_id, response_handler));
handler->process_request ();
}
else
{
error_response (response_handler, "Can only POST requests");
}
});
}
else
{
BOOST_LOG (this_l->node->log) << "RPC read error: " << ec.message ();
}
});
}
namespace
{
std::string filter_request (boost::property_tree::ptree tree_a)
{
// Replace password
boost::optional<std::string> password_text (tree_a.get_optional<std::string> ("password"));
if (password_text.is_initialized ())
{
tree_a.put ("password", "password");
}
// Save first 2 symbols of wallet, key, seed
boost::optional<std::string> wallet_text (tree_a.get_optional<std::string> ("wallet"));
if (wallet_text.is_initialized () && wallet_text.get ().length () > 2)
{
tree_a.put ("wallet", wallet_text.get ().replace (wallet_text.get ().begin () + 2, wallet_text.get ().end (), wallet_text.get ().length () - 2, 'X'));
}
boost::optional<std::string> key_text (tree_a.get_optional<std::string> ("key"));
if (key_text.is_initialized () && key_text.get ().length () > 2)
{
tree_a.put ("key", key_text.get ().replace (key_text.get ().begin () + 2, key_text.get ().end (), key_text.get ().length () - 2, 'X'));
}
boost::optional<std::string> seed_text (tree_a.get_optional<std::string> ("seed"));
if (seed_text.is_initialized () && seed_text.get ().length () > 2)
{
tree_a.put ("seed", seed_text.get ().replace (seed_text.get ().begin () + 2, seed_text.get ().end (), seed_text.get ().length () - 2, 'X'));
}
std::string result;
std::stringstream stream;
boost::property_tree::write_json (stream, tree_a, false);
result = stream.str ();
// removing std::endl
if (result.length () > 1)
{
result.pop_back ();
}
return result;
}
}
void nano::rpc_handler::process_request ()
{
try
{
auto max_depth_exceeded (false);
auto max_depth_possible (0);
for (auto ch : body)
{
if (ch == '[' || ch == '{')
{
if (max_depth_possible >= rpc.config.max_json_depth)
{
max_depth_exceeded = true;
break;
}
++max_depth_possible;
}
}
if (max_depth_exceeded)
{
error_response (response, "Max JSON depth exceeded");
}
else
{
std::stringstream istream (body);
boost::property_tree::read_json (istream, request);
std::string action (request.get<std::string> ("action"));
if (node.config.logging.log_rpc ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("%1% ") % request_id) << filter_request (request);
}
if (action == "account_balance")
{
account_balance ();
}
else if (action == "account_block_count")
{
account_block_count ();
}
else if (action == "account_count")
{
account_count ();
}
else if (action == "account_create")
{
account_create ();
}
else if (action == "account_get")
{
account_get ();
}
else if (action == "account_history")
{
account_history ();
}
else if (action == "account_info")
{
account_info ();
}
else if (action == "account_key")
{
account_key ();
}
else if (action == "account_list")
{
account_list ();
}
else if (action == "account_move")
{
account_move ();
}
else if (action == "account_remove")
{
account_remove ();
}
else if (action == "account_representative")
{
account_representative ();
}
else if (action == "account_representative_set")
{
account_representative_set ();
}
else if (action == "account_weight")
{
account_weight ();
}
else if (action == "accounts_balances")
{
accounts_balances ();
}
else if (action == "accounts_create")
{
accounts_create ();
}
else if (action == "accounts_frontiers")
{
accounts_frontiers ();
}
else if (action == "accounts_pending")
{
accounts_pending ();
}
else if (action == "available_supply")
{
available_supply ();
}
else if (action == "block")
{
block ();
}
else if (action == "block_confirm")
{
block_confirm ();
}
else if (action == "blocks")
{
blocks ();
}
else if (action == "blocks_info")
{
blocks_info ();
}
else if (action == "block_account")
{
block_account ();
}
else if (action == "block_count")
{
block_count ();
}
else if (action == "block_count_type")
{
block_count_type ();
}
else if (action == "block_create")
{
block_create ();
}
else if (action == "block_hash")
{
block_hash ();
}
else if (action == "successors")
{
chain (true);
}
else if (action == "bootstrap")
{
bootstrap ();
}
else if (action == "bootstrap_any")
{
bootstrap_any ();
}
else if (action == "bootstrap_lazy")
{
bootstrap_lazy ();
}
else if (action == "bootstrap_status")
{
bootstrap_status ();
}
else if (action == "chain")
{
chain ();
}
else if (action == "delegators")
{
delegators ();
}
else if (action == "delegators_count")
{
delegators_count ();
}
else if (action == "deterministic_key")
{
deterministic_key ();
}
else if (action == "confirmation_active")
{
confirmation_active ();
}
else if (action == "confirmation_history")
{
confirmation_history ();
}
else if (action == "confirmation_info")
{
confirmation_info ();
}
else if (action == "confirmation_quorum")
{
confirmation_quorum ();
}
else if (action == "frontiers")
{
frontiers ();
}
else if (action == "frontier_count")
{
account_count ();
}
else if (action == "history")
{
request.put ("head", request.get<std::string> ("hash"));
account_history ();
}
else if (action == "keepalive")
{
keepalive ();
}
else if (action == "key_create")
{
key_create ();
}
else if (action == "key_expand")
{
key_expand ();
}
else if (action == "krai_from_raw")
{
mrai_from_raw (nano::kxrb_ratio);
}
else if (action == "krai_to_raw")
{
mrai_to_raw (nano::kxrb_ratio);
}
else if (action == "ledger")
{
ledger ();
}
else if (action == "mrai_from_raw")
{
mrai_from_raw ();
}
else if (action == "mrai_to_raw")
{
mrai_to_raw ();
}
else if (action == "node_id")
{
node_id ();
}
else if (action == "node_id_delete")
{
node_id_delete ();
}
else if (action == "password_change")
{
password_change ();
}
else if (action == "password_enter")
{
password_enter ();
}
else if (action == "password_valid")
{
password_valid ();
}
else if (action == "payment_begin")
{
payment_begin ();
}
else if (action == "payment_init")
{
payment_init ();
}
else if (action == "payment_end")
{
payment_end ();
}
else if (action == "payment_wait")
{
payment_wait ();
}
else if (action == "peers")
{
peers ();
}
else if (action == "pending")
{
pending ();
}
else if (action == "pending_exists")
{
pending_exists ();
}
else if (action == "process")
{
process ();
}
else if (action == "nano_from_raw")
{
mrai_from_raw (nano::xrb_ratio);
}
else if (action == "nano_to_raw")
{
mrai_to_raw (nano::xrb_ratio);
}
else if (action == "receive")
{
receive ();
}
else if (action == "receive_minimum")
{
receive_minimum ();
}
else if (action == "receive_minimum_set")
{
receive_minimum_set ();
}
else if (action == "representatives")
{
representatives ();
}
else if (action == "representatives_online")
{
representatives_online ();
}
else if (action == "republish")
{
republish ();
}
else if (action == "search_pending")
{
search_pending ();
}
else if (action == "search_pending_all")
{
search_pending_all ();
}
else if (action == "send")
{
send ();
}
else if (action == "stats")
{
stats ();
}
else if (action == "stop")
{
stop ();
}
else if (action == "unchecked")
{
unchecked ();
}
else if (action == "unchecked_clear")
{
unchecked_clear ();
}
else if (action == "unchecked_get")
{
unchecked_get ();
}
else if (action == "unchecked_keys")
{
unchecked_keys ();
}
else if (action == "validate_account_number")
{
validate_account_number ();
}
else if (action == "version")
{
version ();
}
else if (action == "wallet_add")
{
wallet_add ();
}
else if (action == "wallet_add_watch")
{
wallet_add_watch ();
}
// Obsolete
else if (action == "wallet_balance_total")
{
wallet_info ();
}
else if (action == "wallet_balances")
{
wallet_balances ();
}
else if (action == "wallet_change_seed")
{
wallet_change_seed ();
}
else if (action == "wallet_contains")
{
wallet_contains ();
}
else if (action == "wallet_create")
{
wallet_create ();
}
else if (action == "wallet_destroy")
{
wallet_destroy ();
}
else if (action == "wallet_export")
{
wallet_export ();
}
else if (action == "wallet_frontiers")
{
wallet_frontiers ();
}
else if (action == "wallet_info")
{
wallet_info ();
}
else if (action == "wallet_key_valid")
{
wallet_key_valid ();
}
else if (action == "wallet_ledger")
{
wallet_ledger ();
}
else if (action == "wallet_lock")
{
wallet_lock ();
}
else if (action == "wallet_locked")
{
password_valid (true);
}
else if (action == "wallet_pending")
{
wallet_pending ();
}
else if (action == "wallet_representative")
{
wallet_representative ();
}
else if (action == "wallet_representative_set")
{
wallet_representative_set ();
}
else if (action == "wallet_republish")
{
wallet_republish ();
}
else if (action == "wallet_unlock")
{
password_enter ();
}
else if (action == "wallet_work_get")
{
wallet_work_get ();
}
else if (action == "work_generate")
{
work_generate ();
}
else if (action == "work_cancel")
{
work_cancel ();
}
else if (action == "work_get")
{
work_get ();
}
else if (action == "work_set")
{
work_set ();
}
else if (action == "work_validate")
{
work_validate ();
}
else if (action == "work_peer_add")
{
work_peer_add ();
}
else if (action == "work_peers")
{
work_peers ();
}
else if (action == "work_peers_clear")
{
work_peers_clear ();
}
else
{
error_response (response, "Unknown command");
}
}
}
catch (std::runtime_error const & err)
{
error_response (response, "Unable to parse JSON");
}
catch (...)
{
error_response (response, "Internal server error in RPC");
}
}
nano::payment_observer::payment_observer (std::function<void(boost::property_tree::ptree const &)> const & response_a, nano::rpc & rpc_a, nano::account const & account_a, nano::amount const & amount_a) :
rpc (rpc_a),
account (account_a),
amount (amount_a),
response (response_a)
{
completed.clear ();
}
void nano::payment_observer::start (uint64_t timeout)
{
auto this_l (shared_from_this ());
rpc.node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (timeout), [this_l]() {
this_l->complete (nano::payment_status::nothing);
});
}
nano::payment_observer::~payment_observer ()
{
}
void nano::payment_observer::observe ()
{
if (rpc.node.balance (account) >= amount.number ())
{
complete (nano::payment_status::success);
}
}
void nano::payment_observer::complete (nano::payment_status status)
{
auto already (completed.test_and_set ());
if (!already)
{
if (rpc.node.config.logging.log_rpc ())
{
BOOST_LOG (rpc.node.log) << boost::str (boost::format ("Exiting payment_observer for account %1% status %2%") % account.to_account () % static_cast<unsigned> (status));
}
switch (status)
{
case nano::payment_status::nothing:
{
boost::property_tree::ptree response_l;
response_l.put ("status", "nothing");
response (response_l);
break;
}
case nano::payment_status::success:
{
boost::property_tree::ptree response_l;
response_l.put ("status", "success");
response (response_l);
break;
}
default:
{
error_response (response, "Internal payment error");
break;
}
}
std::lock_guard<std::mutex> lock (rpc.mutex);
assert (rpc.payment_observers.find (account) != rpc.payment_observers.end ());
rpc.payment_observers.erase (account);
}
}
std::unique_ptr<nano::rpc> nano::get_rpc (boost::asio::io_context & io_ctx_a, nano::node & node_a, nano::rpc_config const & config_a)
{
std::unique_ptr<rpc> impl;
if (config_a.secure.enable)
{
#ifdef NANO_SECURE_RPC
impl.reset (new rpc_secure (io_ctx_a, node_a, config_a));
#else
std::cerr << "RPC configured for TLS, but the node is not compiled with TLS support" << std::endl;
#endif
}
else
{
impl.reset (new rpc (io_ctx_a, node_a, config_a));
}
return impl;
}
| 1 | 14,801 | Very minor: this line could be moved to a more narrow scope, inside `if (!representative.decode_account (representative_text))` Other than that, LGTM | nanocurrency-nano-node | cpp |
@@ -40,10 +40,8 @@ type Connection struct {
// For invalid and closed connections: StopTime is the time when connection was updated last.
// For established connections: StopTime is latest time when it was polled.
StopTime time.Time
- // IsActive flag helps in cleaning up connections when they are not in conntrack any module more.
- IsActive bool
- // DoExport flag helps in tagging connections that can be exported by Flow Exporter
- DoExport bool
+ // IsPresent flag helps in cleaning up connections when they are not in conntrack table anymore.
+ IsPresent bool
Zone uint16
Mark uint32
StatusFlag uint32 | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package flowexporter
import (
"net"
"time"
)
type ConnectionKey [5]string
type ConnectionMapCallBack func(key ConnectionKey, conn Connection) error
type FlowRecordCallBack func(key ConnectionKey, record FlowRecord) error
type Tuple struct {
SourceAddress net.IP
DestinationAddress net.IP
Protocol uint8
SourcePort uint16
DestinationPort uint16
}
type Connection struct {
// Fields from conntrack flows
ID uint32
Timeout uint32
StartTime time.Time
// For invalid and closed connections: StopTime is the time when connection was updated last.
// For established connections: StopTime is latest time when it was polled.
StopTime time.Time
// IsActive flag helps in cleaning up connections when they are not in conntrack any module more.
IsActive bool
// DoExport flag helps in tagging connections that can be exported by Flow Exporter
DoExport bool
Zone uint16
Mark uint32
StatusFlag uint32
Labels, LabelsMask []byte
// TODO: Have a separate field for protocol. No need to keep it in Tuple.
TupleOrig, TupleReply Tuple
OriginalPackets, OriginalBytes uint64
ReversePackets, ReverseBytes uint64
// Fields specific to Antrea
SourcePodNamespace string
SourcePodName string
DestinationPodNamespace string
DestinationPodName string
DestinationServicePortName string
IngressNetworkPolicyName string
IngressNetworkPolicyNamespace string
EgressNetworkPolicyName string
EgressNetworkPolicyNamespace string
}
type FlowRecord struct {
Conn *Connection
PrevPackets uint64
PrevBytes uint64
PrevReversePackets uint64
PrevReverseBytes uint64
IsIPv6 bool
}
| 1 | 30,804 | when is this consumed? I'm probably missing it but I can't find it right now | antrea-io-antrea | go |
@@ -418,6 +418,10 @@ void GroupByAgg::generateCacheKey(CacheWA &cwa) const
groupExpr_.rebuildExprTree(ITM_ITEM_LIST);
if (grpExpr) {
cwa += " gBy:";
+
+ if (isRollup())
+ cwa += " roll:";
+
grpExpr->generateCacheKey(cwa);
}
} | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: RelCache.cpp
* Description: All the RelExpr methods introduced by query caching
* Created: 2/23/2001
* Language: C++
*
*
******************************************************************************
*/
#include "AllRelExpr.h"
#include "CacheWA.h"
#include "CmpMain.h"
#include "GroupAttr.h"
#include "OptHints.h"
#include "QRDescGenerator.h"
#include "HDFSHook.h"
// append an ascii-version of GenericUpdate into cachewa.qryText_
void GenericUpdate::generateCacheKey(CacheWA& cwa) const
// NB: This comment applies to all generateCacheKey methods.
// generateCacheKey is used to generate a string representation s of the
// "parameterized" query. Since this string s is used by QCache::lookUp
// to determine if a query is in the cache, it is essential that:
// (1) two different queries have different string representations
// (2) two queries that differ only in their query literals should
// have the same string representations
// One possible implementation of generateCacheKey is to use the query's
// original query text. But, original query text does not satisfy (2).
// To get (2), we call generateCacheKey() from RelRoot::normalizeForCache
// which, by definition, replaced query literals with constant parameters.
// However, generateCacheKey must also satisfy (1). generateCacheKey must
// generate two different strings for two logically different queries.
//
// To satisfy requirements (1) and (2), generateCacheKey and
// normalizeForCache must be in sync -- every user-specified expr that
// generateCacheKey emits into cwa.qryText_ must be examined by
// normalizeForCache for possible replacement of any literal there into
// a constant parameter.
//
// In order for the literal-into-constantparameter replacement to be safe,
// isCacheableExpr must visit all user-specified exprs to make sure that
// only constants that can be safely cast into the query's target types
// are considered cacheable. For example, given this update query
// update t set a = 'xyz' where pk = 1;
// isCacheableeExpr, normalizeForCache, and generateCacheKey must cooperate
// so that:
// 1) isCacheableExpr rejects the query as noncacheble if 'xyz' cannot be
// safely cast into a's target type, eg, 'xyz' may be too long if a's
// type is char(1).
// 2) normalizeForCache must visit and replace both 'xyz' and 1 with
// appropriate constant parameters.
// 3) generateCacheKey must emit some string representation of the
// parameterized query, eg, "update t set a = % where pk = %".
// generateCacheKey can emit more stuff, eg, internally specified
// begin/end-key predicates, but it must emit a string representation
// of all user-specified parts of the query.
{
// append to cwa.qryText_ GenericUpdate's "essential" data members
RelExpr::generateCacheKey(cwa);
// An extension of the fix to 10-010618-3505, 10-010619-3515:
// for "after bind" Insert/Update/Delete queries, include table's
// RedefTime into cwa.qryText_ to make sure we get a cache hit only on
// query that reference table(s) that have not changed since the query's
// addition to the cache. The queries that reference altered table(s)
// will never be hit again and will eventually age out of the cache.
// This is not strictly necessary, but it speeds up the processing
// of insert/update/delete queries on altered tables.
const NATable *tbl;
if (cwa.getPhase() >= CmpMain::BIND &&
getTableDesc() && (tbl=getTableDesc()->getNATable()) != NULL) {
char redefTime[40];
convertInt64ToAscii(tbl->getRedefTime(), redefTime);
cwa += " redef:";
cwa += redefTime;
}
ItemExpr *newExpr = newRecExprTree_ ? newRecExprTree_ :
newRecExpr_.rebuildExprTree(ITM_ITEM_LIST);
if (newExpr) {
cwa += " newRecExpr:";
newExpr->generateCacheKey(cwa);
}
// make sure cache key can distinguish these 2 queries:
// prepare s from select * from (update t042qT8 set b=7 where a=2) as t;
// prepare s from select * from (update t042qT8 set b=7 set on rollback c=2
// where a=2) as t;
ItemExpr *setOnRollback;
if (newRecBeforeExpr_.entries() > 0 &&
(setOnRollback=newRecBeforeExpr_.rebuildExprTree(ITM_ITEM_LIST))) {
cwa += " setOnRollback:";
setOnRollback->generateCacheKey(cwa);
}
ItemExpr *execPred = executorPredTree_ ? executorPredTree_ :
executorPred_.rebuildExprTree();
if (execPred) {
cwa += " execPred:";
execPred->generateCacheKey(cwa);
}
// MVs --
// The NOLOG parameter is essential.
if (isNoLogOperation()) {
cwa += " NOLOG";
}
// "current of cursor/hostvar" is essential
if (currOfCursorName_) {
currOfCursorName_->generateCacheKey(cwa);
}
// not sure if the following are essential, but better to be safe &
// slightly inefficient than to deliver a false hit (ie, wrong plan)
cwa += mtsStatement_ ? "m1" : "m0";
cwa += noFlow_ ? "n1" : "n0";
cwa += noRollback_ ? "o1" : "o0";
cwa += noCheck_ ? "nc" : "dc";
// not sure if the following are essential, but we don't know how
// to quickly & cheaply include them into our cachekey:
// updatedTableName_, tabId_, updateToSelectMap_, indexDesc_,
// newRecExprArray_, usedColumns_, newRecBeforeExpr_,
// newRecBeforeExprArray_, usedBeforeColumns_, potentialOutputs_
// indexNumberArray_, scanIndexDesc_, rowsAffected_, stoi_,
// oldToNewMap_
// The following data members are not "essential" to generateCacheKey
// (at least "after bind") because they are either covered by other
// data members (eg, beginKeyPred and endKeyPred_ are covered by the
// selection pred in RelExpr) or they are not yet defined until later
// (eg, after the optimize phase):
// indexNewRecExprArrays_, beginKeyPred_, endKeyPred_,
// pathKeys_, partKeys_, indexBeginKeyPredArray_,
// indexEndKeyPredArray_, checkConstraints_
}
// is this entire expression cacheable after this phase?
NABoolean GenericUpdate::isCacheableExpr(CacheWA& cwa)
{
// descend to scans early to get cwa.numberOfScans_
if (!RelExpr::isCacheableExpr(cwa)) {
return FALSE;
}
// Make "{update|delete} ... where current of cursor" non-cacheable
// so that stale cache will not lead to timestamp mismatch error at
// runtime. AQR attempts to handle this error, but only after the
// referenced cursor is closed due to transaction rollback. This is
// Solution 10-100425-9755.
if (currOfCursorName()) {
return FALSE;
}
if (cwa.getPhase() >= CmpMain::BIND) {
// make sure any literals in the assignment clause can be safely
// cast and assigned to their target types at plan-backpatch-time
ItemExpr *newExpr = newRecExprTree_ ? newRecExprTree_ :
newRecExpr_.rebuildExprTree(ITM_ITEM_LIST);
if (newExpr && !newExpr->isSafelyCoercible(cwa)) {
return FALSE;
}
// reject as non-cacheable queries such as
// prepare s from select * from (update t042qT8 set b=7
// set on rollback c=12345678901234567890 where a=2) as t;
ItemExpr *setOnRollback;
if (newRecBeforeExpr_.entries() > 0 &&
(setOnRollback=newRecBeforeExpr_.rebuildExprTree(ITM_ITEM_LIST))
&& !setOnRollback->isSafelyCoercible(cwa)) {
return FALSE;
}
// make sure any executor predicate is cacheable
ItemExpr *execPred = executorPredTree_ ? executorPredTree_ :
executorPred_.rebuildExprTree();
if (execPred) {
cwa.setHasPredicate();
if (execPred->hasNoLiterals(cwa)) {
// predicate with no literals is cacheable
}
else {
cwa.setPredHasNoLit(FALSE);
return execPred->isCacheableExpr(cwa);
}
}
// at this time, not cacheable if subquery is specified in
// UPDATE SET clause.
// This could be enabled later.
if (subqInUpdateAssign()) {
return FALSE;
}
}
else {
if ((getTableName().isPartitionNameSpecified()) ||
(getTableName().isLocationNameSpecified()) ||
(getTableName().isPartitionRangeSpecified()))
return FALSE; // If PartnClause is used no cache hit before bind stage.
}
return TRUE; // may be cacheable
}
RelExpr* Scan::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (CmpCommon::getDefault(QUERY_CACHE_TABLENAME) == DF_OFF) {
// replace descendants' literals into ConstantParameters
return RelExpr::normalizeForCache(cwa, bindWA);
}
// replace tablename with a prototyped tablename.
TableDesc * td = getTableDesc();
CorrName &origName = td->getCorrNameObj();
if (origName.getPrototype() == NULL)
{
Lng32 CACHED_MAX_ANSI_NAME_EXTERNAL_LEN = 128;
NAString hvName("dummy_name");
HostVar * hv =
new(bindWA.wHeap())
HostVar(hvName,
new(bindWA.wHeap()) SQLChar(CACHED_MAX_ANSI_NAME_EXTERNAL_LEN));
hv->setPrototypeValue(origName.getQualifiedNameAsString());
hv->synthTypeAndValueId();
hv->setIsCachedParam(TRUE);
CorrName cn("HostVar$",
bindWA.wHeap(),
hv->getName(), // debugging ease
"$bogus");
cn.setPrototype(hv);
NAString *tmpName =
new (bindWA.wHeap())
NAString(hv->getPrototypeValue(), bindWA.wHeap());
cn.setUgivenName(*tmpName);
cn.applyDefaults(&bindWA, bindWA.getDefaultSchema());
td->setCorrName(cn);
setTableName(cn);
char * strval =
new(bindWA.wHeap()) char[CACHED_MAX_ANSI_NAME_EXTERNAL_LEN];
strcpy(strval, origName.getQualifiedNameAsString().data());
CharType * typ =
new(bindWA.wHeap()) SQLChar(CACHED_MAX_ANSI_NAME_EXTERNAL_LEN, FALSE);
ConstValue * cv =
new(bindWA.wHeap()) ConstValue(typ, strval, CACHED_MAX_ANSI_NAME_EXTERNAL_LEN);
ConstantParameter* result = new(bindWA.wHeap()) ConstantParameter
(*cv, bindWA.wHeap(), cwa.getPhase() == CmpMain::PARSE);
result->synthTypeAndValueId();
cwa.addConstParam(result, bindWA);
hv->setPMOrdPosAndIndex(COM_UNKNOWN_DIRECTION,
-1,
(Int32)cwa.getConstParams().entries());
}
// replace descendants' literals into ConstantParameters
return RelExpr::normalizeForCache(cwa, bindWA);
}
// change literals of a cacheable query into ConstantParameters
RelExpr* GenericUpdate::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (cwa.getPhase() >= CmpMain::BIND) {
if (currOfCursorName_) {
// do NOT parameterize the assignment clause(s) of positioned updates
// because "update t051t22 set b = -1 where current of c1" in esqlc
// program such as core/etest051.sql generates an assertion clause:
// "...if_then_else(b= :hv0),return_true,return_true(raiserror())"
// (see BindItemExpr.cpp Scan::bindUpdateCurrentOf) as part of
// GenericUpdate::bindNode. Otherwise, the result is an error 8106.
// The root cause is incomplete parameterization -- the update
// becomes "update t051t22 set b = 0-? where current of c1" but
// "...if_then_else(b= :hv0),return_true,return_true(raiserror())"
// is untouched causing an error 8106 at runtime.
}
else {
if (newRecExprTree_) {
newRecExprTree_->normalizeForCache(cwa, bindWA);
}
else {
newRecExpr_.normalizeForCache(cwa, bindWA);
}
// parameterize "set on rollback" clause for queries such as
// prepare s from select * from (update t042qT8 set b=7
// set on rollback c=2 where a=2) as t;
newRecBeforeExpr_.normalizeForCache(cwa, bindWA);
}
if (executorPredTree_) {
executorPredTree_->normalizeForCache(cwa, bindWA);
}
else {
executorPred_.normalizeForCache(cwa, bindWA);
}
}
// Solution: 10-060327-5370 and 10-060418-5903
// Record the context-wide isolation_level_for_updates value in
// CacheWA when procssing an IDU stmt. Use ISOLATION_LEVEL_FOR_UPDATES
// if it is specified. Otherwise, use ISOLATION_LEVEL. The initial value
// in cwa is IL_NOT_SPECIFIED_.
if ( cwa.getIsoLvlForUpdates() == TransMode::IL_NOT_SPECIFIED_ ) {
TransMode::IsolationLevel il;
ActiveSchemaDB()->getDefaults().getIsolationLevel (il,
CmpCommon::getDefault(ISOLATION_LEVEL_FOR_UPDATES));
if ( il == TransMode::IL_NOT_SPECIFIED_ ) {
ActiveSchemaDB()->getDefaults().getIsolationLevel (il,
CmpCommon::getDefault(ISOLATION_LEVEL));
}
cwa.setIsoLvlForUpdates(il);
}
// replace descendants' literals into ConstantParameters
return RelExpr::normalizeForCache(cwa, bindWA);
}
// append an ascii-version of IsolatedScalarUDF into cachewa.qryText_
void IsolatedScalarUDF::generateCacheKey(CacheWA &cwa) const
{
NARoutine *routine = NULL;
NARoutine *action = NULL;
RelExpr::generateCacheKey(cwa);
cwa += " UDFname:";
cwa += getRoutineName().getQualifiedNameAsAnsiString().data();
if (cwa.getPhase() >= CmpMain::BIND &&
getRoutineDesc() &&
(routine=getRoutineDesc()->getNARoutine()) != NULL)
{
char redefTime[40];
convertInt64ToAscii(routine->getRedefTime(), redefTime);
cwa += " redef:";
cwa += redefTime;
}
if (getRoutineDesc() != NULL && getRoutineDesc()->isUUDFRoutine())
{
cwa += " action:";
cwa += getRoutineDesc()->getActionNameAsGiven();
if (cwa.getPhase() >= CmpMain::BIND &&
getRoutineDesc() &&
(action=getRoutineDesc()->getActionNARoutine()) != NULL)
{
char redefTime[40];
convertInt64ToAscii(action->getRedefTime(), redefTime);
cwa += " actredef:";
cwa += redefTime;
}
}
const ItemExpr *paramExpr = (getProcAllParamsTree() == NULL) ?
getProcInputParamsVids().rebuildExprTree(ITM_ITEM_LIST) :
getProcAllParamsTree();
if (paramExpr)
{
cwa += " arg:(";
paramExpr->generateCacheKey(cwa);
cwa += ")";
}
}
// append an ascii-version of CallSP into cachewa.qryText_
void CallSP::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKey(cwa);
cwa += " CallSPname:";
cwa += getRoutineName().getQualifiedNameAsAnsiString().data();
const ItemExpr *paramExpr = (getProcAllParamsTree() == NULL) ?
getProcInputParamsVids().rebuildExprTree(ITM_ITEM_LIST) :
getProcAllParamsTree();
if (paramExpr)
{
cwa += " arg:";
paramExpr->generateCacheKey(cwa);
}
}
// append an ascii-version of GroupByAgg into cachewa.qryText_
void GroupByAgg::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKey(cwa);
// group by col/expr is an important part of the key
ItemExpr *grpExpr = groupExprTree_ ? groupExprTree_ :
groupExpr_.rebuildExprTree(ITM_ITEM_LIST);
if (grpExpr) {
cwa += " gBy:";
grpExpr->generateCacheKey(cwa);
}
}
// is this entire expression cacheable after this phase?
NABoolean GroupByAgg::isCacheableExpr(CacheWA& cwa)
{
// descend to scans early to get cwa.numberOfScans_
if (!RelExpr::isCacheableExpr(cwa)) {
return FALSE;
}
// is the group by col/expr cacheable?
ItemExpr *grpExpr = groupExprTree_ ? groupExprTree_ :
groupExpr_.rebuildExprTree(ITM_ITEM_LIST);
if (grpExpr && !grpExpr->isCacheableExpr(cwa)) {
return FALSE;
}
return TRUE; // may be cacheable
}
// append an ascii-version of Insert into cachewa.qryText_
void Insert::generateCacheKey(CacheWA &cwa) const
{
GenericUpdate::generateCacheKey(cwa);
if (insertColTree_) {
cwa += " insCol:";
insertColTree_->generateCacheKey(cwa);
}
// order by clause is important
ItemExpr *orderBy = orderByTree_ ? orderByTree_ :
reqdOrder_.rebuildExprTree();
if (orderBy) {
cwa += " order:";
orderBy->generateCacheKey(cwa);
}
const NATable *tbl;
if (cwa.getPhase() >= CmpMain::BIND &&
getTableDesc() && (tbl=getTableDesc()->getNATable()) != NULL) {
// If PARTITION clause has been used we must reflect that in the key.
if (tbl->isPartitionNameSpecified()) {
cwa += " partition:";
cwa += tbl->getClusteringIndex()->getFileSetName().getQualifiedNameAsString().data();
}
// If PARTITION range has been used we must reflect that in the key.
else if (tbl->isPartitionRangeSpecified()) {
cwa += " partition:";
char str[100];
sprintf(str, " from %d to %d",
tbl->getExtendedQualName().getPartnClause().getBeginPartitionNumber() ,
tbl->getExtendedQualName().getPartnClause().getEndPartitionNumber());
cwa += str;
}
}
if (isUpsert())
{
cwa += " upsert:";
}
}
// is this entire expression cacheable after this phase?
NABoolean Insert::isCacheableExpr(CacheWA& cwa)
{
// non-single-row inserts are non-cacheable
if (insertType_ != SIMPLE_INSERT) {
return FALSE;
}
// single-row insert may be cacheable
return GenericUpdate::isCacheableExpr(cwa);
}
// change literals of a cacheable query into ConstantParameters
RelExpr* Insert::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
// replace descendants' literals into ConstantParameters
return GenericUpdate::normalizeForCache(cwa, bindWA);
}
// is this entire expression cacheable after this phase?
NABoolean Delete::isCacheableExpr(CacheWA& cwa)
{
// fastdelete (purgedata) is not a cacheable expression.
if (isFastDelete())
return FALSE;
return GenericUpdate::isCacheableExpr(cwa);
}
// append an ascii-version of Merge into cachewa.qryText_
void MergeUpdate::generateCacheKey(CacheWA &cwa) const
{
Update::generateCacheKey(cwa);
if (insertCols_) {
cwa += " insertCols:";
insertCols_->generateCacheKey(cwa);
}
if (insertValues_) {
cwa += " insertValues:";
insertValues_->generateCacheKey(cwa);
}
}
// is this entire expression cacheable after this phase?
NABoolean MergeUpdate::isCacheableExpr(CacheWA& cwa)
{
if ((insertValues_) &&
(insertValues_->isCacheableExpr(cwa))) {
setNonCacheable();
return FALSE;
}
return Update::isCacheableExpr(cwa);
}
// change literals of a cacheable query into ConstantParameters
RelExpr* MergeUpdate::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (insertValues_) {
insertValues_ = insertValues_->normalizeForCache(cwa, bindWA);
}
// replace descendants' literals into ConstantParameters
return Update::normalizeForCache(cwa, bindWA);
}
// append an ascii-version of Merge into cachewa.qryText_
void MergeDelete::generateCacheKey(CacheWA &cwa) const
{
Delete::generateCacheKey(cwa);
if (insertCols_) {
cwa += " insertCols:";
insertCols_->generateCacheKey(cwa);
}
if (insertValues_) {
cwa += " insertValues:";
insertValues_->generateCacheKey(cwa);
}
}
// is this entire expression cacheable after this phase?
NABoolean MergeDelete::isCacheableExpr(CacheWA& cwa)
{
if ((insertValues_) &&
(insertValues_->isCacheableExpr(cwa))) {
setNonCacheable();
return FALSE;
}
return Delete::isCacheableExpr(cwa);
}
// change literals of a cacheable query into ConstantParameters
RelExpr* MergeDelete::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (insertValues_) {
insertValues_ = insertValues_->normalizeForCache(cwa, bindWA);
}
// replace descendants' literals into ConstantParameters
return Delete::normalizeForCache(cwa, bindWA);
}
// append an ascii-version of Join into cachewa.qryText_
void Join::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKeyNode(cwa);
if (isNaturalJoin_) {
cwa += " natj ";
}
ItemExpr *pred = joinPredTree_ ? joinPredTree_ :
joinPred_.rebuildExprTree();
if (pred) {
cwa += " joinPred:";
pred->generateCacheKey(cwa);
}
generateCacheKeyForKids(cwa);
}
// is this entire expression cacheable after this phase?
NABoolean Join::isCacheableExpr(CacheWA& cwa)
{
if (cwa.getPhase() >= CmpMain::BIND) {
// must first descend to scans to get cwa.numberOfScans_
if (!RelExpr::isCacheableExpr(cwa)) {
return FALSE;
}
if (isCacheableNode(cwa.getPhase())) {
cwa.setConditionallyCacheable();
}
// if we allow joins of views to be cached, query caching cannot
// distinguish between (see note at bottom of cachewa.h)
// select avg(f.a) from v f, v s group by f.b;
// select avg(s.a) from v f, v s group by f.b;
// select avg(t.a) from v f, t group by f.b;
// assuming v is "create view v from select * from t". We avoid
// false cache hits by detecting the possible occurrence of such
// view joins here and later using cwa.isViewJoin_ to include
// their query texts into their cache keys.
//
// A view is repsented by a renamed table with isView() returnning
// TRUE.
RelExpr *c0 = child(0);
RelExpr *c1 = child(1);
if ((c0->getOperatorType() == REL_RENAME_TABLE &&
((RenameTable *)c0)->isView() == TRUE)
||
(c1->getOperatorType() == REL_RENAME_TABLE &&
((RenameTable *)c1)->isView() == TRUE)) {
cwa.foundViewJoin();
}
// check its join predicate
ItemExpr *pred = joinPredTree_ ? joinPredTree_ :
joinPred_.rebuildExprTree();
if (pred) {
cwa.setHasPredicate();
// is join predicate cacheable?
if (pred->hasNoLiterals(cwa)) {
// predicate with no literals is cacheable
}
else {
cwa.setPredHasNoLit(FALSE);
if (!pred->isCacheableExpr(cwa)) {
// a non-cacheable predicate renders Join non-cacheable.
setNonCacheable();
return FALSE;
}
}
}
return TRUE; // join may be cacheable
}
return FALSE;
}
// change literals of a cacheable query into ConstantParameters
RelExpr* Join::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
return RelExpr::normalizeForCache(cwa, bindWA);
}
// are RelExpr's kids cacheable after this phase?
NABoolean RelExpr::cacheableKids(CacheWA& cwa)
{
switch (cwa.getPhase()) {
case CmpMain::PARSE:
case CmpMain::BIND: {
Int32 arity = getArity();
if (arity <= 0) { // we have no kids
if (cwa.isConditionallyCacheable()) {
// we're conditionally cacheable and have no kids
setCacheableNode(cwa.getPhase());
return TRUE; // so, we're cachable
}
else {
return FALSE; // MAYBECACHEABLE is not cacheable at this phase
// don't mark this node non-cacheable because this
// RelExpr may be cacheable after the next phase.
}
}
// cacheability of child(ren) determine our cacheability
for (Int32 x = 0; x < arity; x++) {
if (!child(x) || // cases like "insert into t default values"
// return 1 from getArity() even if child(0) is NULL; so
// guard against this potential mxcmp crash and consider
// these cases non-cacheable during the PARSE stage.
child(x)->isNonCacheable()) {
// the 1st noncacheable child makes us noncacheable
setNonCacheable();
return FALSE;
}
else if (!child(x)->isCacheableExpr(cwa)) {
// noncacheable child
return FALSE;
// don't mark this node non-cacheable because this
// RelExpr may be cacheable after the next phase.
}
else { // cacheable child
continue; // look at next child
}
}
// all children are cacheable, so we're cacheable too
setCacheableNode(cwa.getPhase());
return TRUE;
}
default:
return FALSE;
}
}
// append an ascii-version of RelExpr into cachewa.qryText_
void RelExpr::generateCacheKey(CacheWA &cwa) const
{
generateCacheKeyNode(cwa);
generateCacheKeyForKids(cwa);
}
// append an ascii-version of RelExpr node into cachewa.qryText_
void RelExpr::generateCacheKeyNode(CacheWA &cwa) const
{
// emit any "[firstn_sorted]" modifier
if (firstNRows_ != -1) {
char firstN[40];
convertInt64ToAscii(((RelExpr*)this)->getFirstNRows(), firstN);
cwa += firstN;
cwa += " ";
}
// emit other "significant" parts of RelExpr
cwa += getText();
ItemExpr *pred = selPredTree() ? selPredTree() :
getSelectionPred().rebuildExprTree();
if (pred) {
cwa += " selPred:";
pred->generateCacheKey(cwa);
}
// make any optimizer hints part of the postbinder cache key so that
// 2 cacheable queries with different optimizer hints do not match
if (hint_) {
CollIndex x, cnt=hint_->indexCnt();
if (cnt > 0) {
cwa += " xhint:";
for (x = 0; x < cnt; x++) {
cwa += (*hint_)[x].data();
cwa += ",";
}
}
char str[100];
if (hint_->hasCardinality()) {
sprintf(str, "card:%g", hint_->getCardinality());
cwa += str;
}
if (hint_->hasSelectivity()) {
sprintf(str, ",sel:%g", hint_->getSelectivity());
cwa += str;
}
}
}
// append an ascii-version of RelExpr's kids into cachewa.qryText_
void RelExpr::generateCacheKeyForKids(CacheWA& cwa) const
{
Int32 maxi = getArity();
if (maxi) {
cwa += " kids(";
for (Lng32 i = 0; i < maxi; i++) {
if (i > 0) {
cwa += ",";
}
if ( child(i).getPtr() == NULL ) {
continue;
}
child(i)->generateCacheKey(cwa);
}
cwa += ")";
}
}
// return any Scan node from this RelExpr
Scan *RelExpr::getAnyScanNode() const
{
if (getOperatorType() == REL_SCAN) {
return (Scan*)this;
}
Scan *result = NULL;
Int32 arity = getArity();
for (Int32 x = 0; x < arity && !result; x++) {
if (child(x)) {
result = child(x)->getAnyScanNode();
}
}
return result;
}
// is this entire expression cacheable after this phase?
NABoolean RelExpr::isCacheableExpr(CacheWA& cwa)
{
switch (cwa.getPhase()) {
case CmpMain::PARSE:
case CmpMain::BIND: {
// does query have too many ExprNodes?
if (cwa.inc_N_check_still_cacheable() == FALSE) {
// yes. query with too many ExprNodes is not cacheable.
return FALSE;
}
if (isNonCacheable()) { // this node is not cacheable
return FALSE; // so the entire expression is not cacheable
// don't mark this node non-cacheable because this
// RelExpr may be cacheable after the next phase.
}
if (isCacheableNode(cwa.getPhase())) {
// must be an INSERT, UPDATE, DELETE, or SELECT node;
// so, mark this expression as conditionally cacheable.
cwa.setConditionallyCacheable();
}
// must descend to scans to get cwa.numberOfScans_
if (!cacheableKids(cwa)) {
return FALSE;
}
// this node is either cacheable or maybecacheable
// check its selection predicate
ItemExpr *pred = selPredTree() ? selPredTree() :
getSelectionPred().rebuildExprTree();
if (pred) {
cwa.setHasPredicate();
// is selection predicate cacheable?
if (pred->hasNoLiterals(cwa)) {
// predicate with no literals is cacheable
}
else {
cwa.setPredHasNoLit(FALSE);
if (!pred->isCacheableExpr(cwa)) {
// a non-cacheable selection predicate
// renders entire RelExpr non-cacheable.
setNonCacheable();
return FALSE;
}
}
}
return TRUE; // RelExpr may be cacheable
}
default: { const NABoolean notYetImplemented = FALSE;
CMPASSERT(notYetImplemented);
return FALSE;
}
}
}
// is this ExprNode cacheable after this phase?
NABoolean RelExpr::isCacheableNode(CmpPhase phase) const
{
switch (phase) {
case CmpMain::PARSE:
return cacheable_ == ExprNode::CACHEABLE_PARSE;
case CmpMain::BIND:
return cacheable_ == ExprNode::CACHEABLE_BIND ||
cacheable_ == ExprNode::CACHEABLE_PARSE;
default:
break;
}
return FALSE;
}
// change literals of a cacheable query into ConstantParameters
RelExpr* RelExpr::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
// replace descendants' literals into ConstantParameters
normalizeKidsForCache(cwa, bindWA);
if (cwa.getPhase() >= CmpMain::BIND) {
if (selection_) {
selection_ = selection_->normalizeForCache(cwa, bindWA);
}
else {
selectionPred().normalizeForCache(cwa, bindWA);
}
// RelExpr::bindSelf has done this line during binding; but, we
// must redo it to recognize any new constantparameters created
// by the above normalizeForCache call(s) as RelExpr inputs.
getGroupAttr()->addCharacteristicInputs
(bindWA.getCurrentScope()->getOuterRefs());
}
markAsNormalizedForCache();
return this;
}
// change literals in cacheable query's kids into ConstantParameters
void RelExpr::normalizeKidsForCache(CacheWA& cachewa, BindWA& bindWA)
{
Int32 arity = getArity();
for (Int32 x = 0; x < arity; x++) {
child(x) = child(x)->normalizeForCache(cachewa, bindWA);
}
}
// mark this ExprNode as cacheable after this phase
void RelExpr::setCacheableNode(CmpPhase phase)
{
switch (phase) {
case CmpMain::PARSE:
cacheable_ = ExprNode::CACHEABLE_PARSE;
break;
case CmpMain::BIND:
cacheable_ = ExprNode::CACHEABLE_BIND;
break;
default:
break;
}
}
// append an ascii-version of RelRoot into cachewa.qryText_
void RelRoot::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKey(cwa);
ItemExpr *cExpr = compExprTree_ ? compExprTree_ :
compExpr_.rebuildExprTree();
if (cExpr) {
// append any select list into cache key
cwa += " cExpr:";
cExpr->generateCacheKey(cwa);
// reflect any "[first n]"
cwa += ((RelRoot*)this)->needFirstSortedRows() ? " 1stN" : " ";
// Should the select_list aliases be a part of the cache key?
// Their not affecting the compiled plan argues for their exclusion.
// Their affecting sqlci's expected output argues for their inclusion.
RETDesc *rDesc = getRETDesc();
CollIndex degree, x;
if (rDesc && (degree=rDesc->getDegree()) > 0) {
cwa += " sla:";
for (x = 0; x < degree; x++){
cwa += rDesc->getColRefNameObj(x).getColName().data();
cwa += " ";
}
// fix 0-061115-0532 (query cache didn't handle select with embedded
// update correctly). New/Old corr. names are recorded for embedded
// updates here for exact match. This is important because otherwise
// a reuse of a query returning the old/new version of values for
// a query requesting new/old version is totally possible and
// unacceptable.
//
// Sample embedded update queries
// select * from (update tab1 set x = x + 1 where x > 1 return new.*) y;
// select * from (update tab1 set x = x + 1 where x > 1 return new.x, old.y) y;
//
if ( cwa.isUpdate() && isTrueRoot() == FALSE ) {
cwa += " corrNamTok:";
cwa += rDesc->getBindWA()->getCorrNameTokens();
}
}
}
// order by clause is important
ItemExpr *orderBy = orderByTree_ ? orderByTree_ :
reqdOrder_.rebuildExprTree();
if (orderBy) {
cwa += " order:";
orderBy->generateCacheKey(cwa);
}
// statement-level access type & lock mode are important for multiuser
// applications. both are reflected in the stmt-level and/or context-wide
// TransMode. So, we mimic RelRoot::codeGen logic here: "copy the current
// context-wide TransMode, then overlay with this stmt's 'FOR xxx ACCESS'
// setting, if any".
TransMode tmode;
tmode.updateTransMode(CmpCommon::transMode());
StmtLevelAccessOptions &opts = ((RelRoot*)this)->accessOptions();
if (opts.accessType() != ACCESS_TYPE_NOT_SPECIFIED_) {
tmode.updateAccessModeFromIsolationLevel
(TransMode::ATtoIL(opts.accessType()));
tmode.setStmtLevelAccessOptions();
}
if (isTrueRoot()) {
// these are needed by Javier's qc stats virtual tbl interface
cwa.setIsoLvl(tmode.getIsolationLevel());
cwa.setAccessMode(tmode.getAccessMode());
cwa.setAutoCommit(tmode.getAutoCommit());
cwa.setFlags(tmode.getFlags());
cwa.setRollbackMode(tmode.getRollbackMode());
cwa.setAutoabortInterval(tmode.getAutoAbortIntervalInSeconds());
cwa.setMultiCommit(tmode.getMultiCommit());
}
// needed to distinguish these queries and avoid a false hit
// select * from (delete from t where a=2) as t;
// select * from (delete from t where a=2 for SKIP CONFLICT ACCESS) as t;
char mode[40];
str_itoa(tmode.getIsolationLevel(), mode); cwa += " il:"; cwa += mode;
str_itoa(tmode.getAccessMode(), mode); cwa += " am:"; cwa += mode;
// Solution: 10-060418-5903
str_itoa(cwa.getIsoLvlForUpdates(), mode); cwa += " ilu:"; cwa += mode;
str_itoa(tmode.getAutoCommit(), mode); cwa += " ac:"; cwa += mode;
str_itoa(tmode.getFlags(), mode); cwa += " fl:"; cwa += mode;
str_itoa(tmode.getRollbackMode(), mode); cwa += " rm:"; cwa += mode;
str_itoa(tmode.getAutoAbortIntervalInSeconds(), mode); cwa += " ai:"; cwa += mode;
str_itoa(tmode.getMultiCommit(), mode); cwa += " mc:"; cwa += mode;
if (opts.lockMode() != LOCK_MODE_NOT_SPECIFIED_) {
// need to distinguish these queries and avoid a false hit
// select * from t in share mode;
// select * from t in exclusive mode;
str_itoa(opts.lockMode(), mode); cwa += " lm:"; cwa += mode;
}
// updatableSelect_ is essential. Otherwise, queries like
// "select * from t" and "select * from t for update" can confuse
// query caching into a false hit, causing fullstack/test051 to fail.
if (updatableSelect_) {
cwa += " 4updt ";
}
// for update of col [,col]... clause is important
ItemExpr *updCol = updateColTree_ ? updateColTree_ :
updateCol_.rebuildExprTree();
if (updCol) {
cwa += " updCol:";
updCol->generateCacheKey(cwa);
}
// making the CQS part of the key is more efficient than calling
// CompilerEnv::changeEnv() in ControlDB::setRequiredShape()
if (reqdShape_) {
reqdShape_->unparse(cwa.reqdShape_);
}
}
// is this entire expression cacheable after this phase?
NABoolean RelRoot::isCacheableExpr(CacheWA& cwa)
{
//queries prefixed by display are not cacheable e.g. display select * from ...
if(getDisplayTree())
return FALSE;
// Parallel extract producer queries are not cacheable
if (numExtractStreams_ > 0)
return FALSE;
// descend to scans early to get cwa.numberOfScans_
if (!RelExpr::isCacheableExpr(cwa)) {
return FALSE;
}
if (cwa.getPhase() == CmpMain::PARSE) {
if (compExprTree_ || compExpr_.entries() > 0) {
// insert-returning is not cacheable after parse
return FALSE;
}
}
else if (cwa.getPhase() >= CmpMain::BIND) {
// make sure select list is cacheable
if (compExprTree_) {
if (!compExprTree_->isCacheableExpr(cwa)) {
return FALSE;
}
}
else if (!compExpr_.isCacheableExpr(cwa)) {
return FALSE;
}
}
if (isAnalyzeOnly())
return FALSE;
return TRUE;
}
// change literals of a cacheable query into ConstantParameters and save
// true root into cachewa so we can "bind" ConstantParameters as "inputvars"
RelExpr* RelRoot::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (isTrueRoot()) {
cwa.setTopRoot(this);
}
if (cwa.getPhase() >= CmpMain::BIND) {
// replace any select list literals into constant parameters
if (compExprTree_) {
compExprTree_ = compExprTree_->normalizeForCache(cwa, bindWA);
}
else {
compExpr_.normalizeForCache(cwa, bindWA);
}
}
// replace descendants' literals into ConstantParameters
RelExpr *result = RelExpr::normalizeForCache(cwa, bindWA);
if (cwa.getPhase() >= CmpMain::BIND) {
// query tree has undergone BINDing, but RelExpr::normalizeForCache
// may have introduced new ConstantParameters in place of ConstValues;
// we want to BIND these new ConstantParameters but a RelRoot::bindNode()
// call here would be overkill; we just want these new ConstantParameters
// to be "bound" as "inputvars"; so, we selectively cut and paste code
// from BindRelExpr.cpp RelRoot::bindNode into here to "bind" any new
// ConstantParameters as "inputvars".
ItemExpr *inputVarTree = removeInputVarTree();
if (inputVarTree) {
inputVarTree->convertToValueIdList(inputVars(), &bindWA, ITM_ITEM_LIST);
if (bindWA.errStatus()) {
return NULL;
}
}
}
return result;
}
// append an ascii-version of Scan into cachewa.qryText_
void Scan::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKey(cwa);
// Fix to 10-010618-3505, 10-010619-3515: include this Scan table's
// RedefTime into cwa.qryText_ to make sure we get a cache hit only on
// query that reference table(s) that have not changed since the query's
// addition to the cache. The queries that reference altered table(s)
// will never be hit again and will eventually age out of the cache.
const NATable *tbl;
if (cwa.getPhase() >= CmpMain::BIND &&
getTableDesc() && (tbl=getTableDesc()->getNATable()) != NULL) {
char redefTime[40];
convertInt64ToAscii(tbl->getRedefTime(), redefTime);
cwa += " redef:";
cwa += redefTime;
if (tbl->isHiveTable()) {
char lastModTime[40];
Int64 mTime = tbl->getClusteringIndex()->getHHDFSTableStats()->getModificationTS();
convertInt64ToAscii(mTime, lastModTime);
cwa += " lastMod:";
cwa += lastModTime;
cwa += " numFiles:";
char numFiles[20];
Int64 numberOfFiles = tbl->getClusteringIndex()->getHHDFSTableStats()->getNumFiles();
sprintf(numFiles, " %ld", numberOfFiles);
cwa += numFiles ;
}
// save pointer to this table. later, QueryCache::addEntry will use
// this pointer to get to this table's histograms's timestamp
cwa.addTable( (NATable*)tbl );
// If PARTITION clause has been used we must reflect that in the key.
if (tbl->isPartitionNameSpecified()) {
cwa += " partition:";
cwa += tbl->getClusteringIndex()->getFileSetName().getQualifiedNameAsString().data();
}
// If PARTITION range has been used we must reflect that in the key.
else if (tbl->isPartitionRangeSpecified()) {
cwa += " partition:";
char str[100];
sprintf(str, " from %d to %d",
tbl->getExtendedQualName().getPartnClause().getBeginPartitionNumber() ,
tbl->getExtendedQualName().getPartnClause().getEndPartitionNumber());
cwa += str;
}
}
// We must reflect userTableName_.location into cache key.
// Otherwise, two queries which differ only in location such as
// table table (table T058a, location $system.zsd12345.x1234500);
// table table (table T058a, location $data .zsd12345.x1234500);
// can confuse our query caching code to return a false hit and
// cause fullstack/test058 to fail.
cwa += userTableName_.getLocationName().data();
// Same with stream_ because queries like
// "select * from t" and "select * from stream(t)" can
// confuse query caching into a false hit causing test079 to fail.
if (stream_) {
cwa += " stream ";
}
// mark mpalias queries so they can be decached upon user request
if (getTableDesc()->getNATable()->isAnMPTableWithAnsiName()) {
cwa += AM_AN_MPALIAS_QUERY;
}
if (getHbaseAccessOptions())
{
cwa += " hbaseVersions: ";
char numVersions[20];
sprintf(numVersions, " %d", getHbaseAccessOptions()->getHbaseVersions());
cwa += numVersions ;
}
}
// is this entire expression cacheable after this phase?
NABoolean Scan::isCacheableExpr(CacheWA& cwa)
{
if (cwa.getPhase() >= CmpMain::BIND) {
// save scan's TableDesc
cwa.incNofScans(tabId_);
// native hbase access is not cacheable for now.
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()))
return FALSE;
if (stream_) { // pub-sub streams are not cacheable
return FALSE;
}
// mpalias SELECT is not cacheable unless explicitly requested
if (getTableDesc()->getNATable()->isAnMPTableWithAnsiName() &&
CmpCommon::getDefault(QUERY_CACHE_MPALIAS) == DF_OFF) {
return FALSE;
}
cwa.setConditionallyCacheable();
if (CmpCommon::getDefaultLong(MVQR_REWRITE_LEVEL) >= 1 &&
QRDescGenerator::hasRewriteEnabledMVs(getTableDesc())) {
cwa.setRewriteEnabledMV();
}
return RelExpr::isCacheableExpr(cwa);
}
return FALSE;
}
// append an ascii-version of Tuple into cachewa.qryText_
void Tuple::generateCacheKey(CacheWA &cwa) const
{
// Do not call RelExpr::generateCacheKey(cwa) here because it's redundant.
// It does the same things as the code below. RelExpr::generateCacheKey()
// calls Tuple::getText() which has logic similar to the following code.
ItemExpr *tExpr = tupleExprTree() ? tupleExprTree() :
tupleExpr_.rebuildExprTree();
if (tExpr) {
cwa += " tupExpr:";
tExpr->generateCacheKey(cwa);
}
else {
RelExpr::generateCacheKey(cwa);
}
}
// is this entire expression cacheable after this phase?
NABoolean Tuple::isCacheableExpr(CacheWA& cwa)
{
// we do not call RelExpr::isCacheableExpr here because it's redundant
// -- Tuple is a leaf node and has no predicates.
ItemExpr *tExpr = tupleExprTree() ? tupleExprTree() :
tupleExpr_.rebuildExprTree();
return tExpr->isCacheableExpr(cwa);
}
// change literals of a cacheable query into ConstantParameters
RelExpr* Tuple::normalizeForCache(CacheWA& cwa, BindWA& bindWA)
{
if (nodeIsNormalizedForCache()) {
return this;
}
if (tupleExprTree_) {
tupleExprTree_ = tupleExprTree_->normalizeForCache(cwa, bindWA);
}
else {
tupleExpr_.normalizeForCache(cwa, bindWA);
}
// replace descendants' literals into ConstantParameters
return RelExpr::normalizeForCache(cwa, bindWA);
}
// append an ascii-version of Union into cachewa.qryText_
void Union::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKeyNode(cwa);
char buf[40];
cwa += " flgs_: ";
convertInt64ToAscii(flags_, buf);
cwa += buf;
cwa += " ctrFlgs_: ";
convertInt64ToAscii(controlFlags_, buf);
cwa += buf;
cwa += " sysGen_: ";
cwa += (isSystemGenerated_) ? "1" : "0";
// turn on the following when condExprTree_ and trigExceptExprTree_
// are considered part of the key
//
//if (condExprTree_) {
// cwa += " condExprTree_: ";
// condExprTree_->generateCacheKey(cwa);
//}
//if (trigExceptExprTree_) {
// cwa += " trigExceptExprTree_: ";
// trigExceptExprTree_->generateCacheKey(cwa);
//}
generateCacheKeyForKids(cwa);
}
NABoolean Update::isCacheableExpr(CacheWA& cwa)
{
cwa.setIsUpdate(TRUE);
return GenericUpdate::isCacheableExpr(cwa);
}
// append an ascii-version of FastExtract into cachewa.qryText_
void FastExtract::generateCacheKey(CacheWA &cwa) const
{
RelExpr::generateCacheKeyNode(cwa);
char buf[40];
cwa += " targType_ ";
str_itoa(getTargetType(), buf);
cwa += buf;
cwa += " targName_ ";
cwa += getTargetName();
cwa += " delim_ ";
cwa += getDelimiter();
cwa += " isAppend_ ";
cwa += isAppend() ? "1" : "0";
cwa += " includeHeader_ ";
cwa += includeHeader() ? "1" : "0";
cwa += " cType_ ";
str_itoa(getCompressionType(), buf);
cwa += buf;
cwa += " nullString_ ";
cwa += getNullString();
cwa += " recSep_ ";
cwa += getRecordSeparator();
generateCacheKeyForKids(cwa);
}
| 1 | 14,190 | I think we also need to add the rollupGroupExprList() to the cache key. If we rebuild the list above from a ValueIdSet on line 418 above, it is probably going to be in the same order, regardless whether it was ROLLUP(a,b) or ROLLUP(b,a). | apache-trafodion | cpp |
@@ -14,10 +14,15 @@
*/
package com.google.api.codegen.transformer.nodejs;
+import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.transformer.ApiMethodParamTransformer;
import com.google.api.codegen.transformer.MethodTransformerContext;
+import com.google.api.codegen.transformer.SurfaceNamer;
+import com.google.api.codegen.util.Name;
import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView;
import com.google.api.codegen.viewmodel.ParamDocView;
+import com.google.api.codegen.viewmodel.SimpleParamDocView;
+import com.google.api.tools.framework.model.Field;
import com.google.common.collect.ImmutableList;
import java.util.List;
| 1 | /* Copyright 2017 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.nodejs;
import com.google.api.codegen.transformer.ApiMethodParamTransformer;
import com.google.api.codegen.transformer.MethodTransformerContext;
import com.google.api.codegen.viewmodel.DynamicLangDefaultableParamView;
import com.google.api.codegen.viewmodel.ParamDocView;
import com.google.common.collect.ImmutableList;
import java.util.List;
public class NodeJSApiMethodParamTransformer implements ApiMethodParamTransformer {
@Override
public List<DynamicLangDefaultableParamView> generateMethodParams(
MethodTransformerContext context) {
// TODO(eoogbe): implement this method when migrating to MVVM
return ImmutableList.<DynamicLangDefaultableParamView>of();
}
@Override
public List<ParamDocView> generateParamDocs(MethodTransformerContext context) {
// TODO(eoogbe): implement this method when migrating to MVVM
return ImmutableList.<ParamDocView>of();
}
}
| 1 | 21,608 | can use `getParamTypeName` instead | googleapis-gapic-generator | java |
@@ -184,14 +184,14 @@ class AdminController extends Controller
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
- if ($this->request->isXmlHttpRequest()) {
- return $this->ajaxEdit();
- }
-
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
+ if ($this->request->isXmlHttpRequest() && $this->request->query->has('property')) {
+ return $this->toggleBooleanProperty($entity, $this->request->query->get('property'), $this->request->query->get('newValue'));
+ }
+
$fields = $this->entity['edit']['fields'];
$editForm = $this->executeDynamicMethod('create<EntityName>EditForm', array($entity, $fields)); | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Platforms\PostgreSqlPlatform;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\QueryBuilder;
use JavierEguiluz\Bundle\EasyAdminBundle\Event\EasyAdminEvents;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\UndefinedEntityException;
use Pagerfanta\Adapter\DoctrineORMAdapter;
use Pagerfanta\Pagerfanta;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
/**
* The controller used to render all the default EasyAdmin actions.
*
* @author Javier Eguiluz <[email protected]>
*/
class AdminController extends Controller
{
protected $config;
protected $entity = array();
/** @var Request */
protected $request;
/** @var EntityManager */
protected $em;
/**
* @Route("/", name="easyadmin")
* @Route("/", name="admin")
*
* The 'admin' route is deprecated since version 1.8.0 and it will be removed in 2.0.
*
* @param Request $request
*
* @return RedirectResponse|Response
*/
public function indexAction(Request $request)
{
$this->initialize($request);
if (null === $request->query->get('entity')) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->config['default_entity_name'])));
}
$action = $request->query->get('action', 'list');
if (!$this->isActionAllowed($action)) {
throw new ForbiddenActionException(array('action' => $action, 'entity' => $this->entity['name']));
}
return $this->executeDynamicMethod($action.'<EntityName>Action');
}
/**
* It renders the main CSS applied to the backend design. This controller
* allows to generate dynamic CSS files that use variables without the need
* to set up a CSS preprocessing toolchain.
*
* @Route("/_css/admin.css", name="_easyadmin_render_css")
*
* @return Response
*/
public function renderCssAction()
{
$config = $this->container->getParameter('easyadmin.config');
$cssContent = $this->renderView('@EasyAdmin/css/admin.css.twig', array(
'brand_color' => $config['design']['brand_color'],
'color_scheme' => $config['design']['color_scheme'],
));
return Response::create($cssContent, 200, array('Content-Type' => 'text/css'))
->setPublic()
->setSharedMaxAge(600)
;
}
/**
* Utility method which initializes the configuration of the entity on which
* the user is performing the action.
*
* @param Request $request
*/
protected function initialize(Request $request)
{
$this->dispatch(EasyAdminEvents::PRE_INITIALIZE);
$this->config = $this->container->getParameter('easyadmin.config');
if (0 === count($this->config['entities'])) {
throw new NoEntitiesConfiguredException();
}
// this condition happens when accessing the backend homepage, which
// then redirects to the 'list' action of the first configured entity
if (null === $entityName = $request->query->get('entity')) {
return;
}
if (!array_key_exists($entityName, $this->config['entities'])) {
throw new UndefinedEntityException(array('entity_name' => $entityName));
}
$this->entity = $this->get('easyadmin.configurator')->getEntityConfiguration($entityName);
if (!$request->query->has('sortField')) {
$request->query->set('sortField', $this->entity['primary_key_field_name']);
}
if (!$request->query->has('sortDirection') || !in_array(strtoupper($request->query->get('sortDirection')), array('ASC', 'DESC'))) {
$request->query->set('sortDirection', 'DESC');
}
$this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']);
$this->request = $request;
$this->dispatch(EasyAdminEvents::POST_INITIALIZE);
}
protected function dispatch($eventName, array $arguments = array())
{
$arguments = array_replace(array(
'config' => $this->config,
'em' => $this->em,
'entity' => $this->entity,
'request' => $this->request,
), $arguments);
$subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity'];
$event = new GenericEvent($subject, $arguments);
$this->get('event_dispatcher')->dispatch($eventName, $event);
}
/**
* The method that is executed when the user performs a 'list' action on an entity.
*
* @return Response
*/
protected function listAction()
{
$this->dispatch(EasyAdminEvents::PRE_LIST);
$fields = $this->entity['list']['fields'];
$paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'));
$this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* The method that is executed when the user performs a 'edit' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function editAction()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if ($this->request->isXmlHttpRequest()) {
return $this->ajaxEdit();
}
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['edit']['fields'];
$editForm = $this->executeDynamicMethod('create<EntityName>EditForm', array($entity, $fields));
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$editForm->handleRequest($this->request);
if ($editForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity));
$this->executeDynamicMethod('preUpdate<EntityName>Entity', array($entity));
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity));
$refererUrl = $this->request->query->get('referer', '');
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_EDIT);
return $this->render($this->entity['templates']['edit'], array(
'form' => $editForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'show' action on an entity.
*
* @return Response
*/
protected function showAction()
{
$this->dispatch(EasyAdminEvents::PRE_SHOW);
$id = $this->request->query->get('id');
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$fields = $this->entity['show']['fields'];
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$this->dispatch(EasyAdminEvents::POST_SHOW, array(
'deleteForm' => $deleteForm,
'fields' => $fields,
'entity' => $entity,
));
return $this->render($this->entity['templates']['show'], array(
'entity' => $entity,
'fields' => $fields,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'new' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function newAction()
{
$this->dispatch(EasyAdminEvents::PRE_NEW);
$entity = $this->executeDynamicMethod('createNew<EntityName>Entity');
$easyadmin = $this->request->attributes->get('easyadmin');
$easyadmin['item'] = $entity;
$this->request->attributes->set('easyadmin', $easyadmin);
$fields = $this->entity['new']['fields'];
$newForm = $this->executeDynamicMethod('create<EntityName>NewForm', array($entity, $fields));
$newForm->handleRequest($this->request);
if ($newForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity));
$this->executeDynamicMethod('prePersist<EntityName>Entity', array($entity));
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity));
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_NEW, array(
'entity_fields' => $fields,
'form' => $newForm,
'entity' => $entity,
));
return $this->render($this->entity['templates']['new'], array(
'form' => $newForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
));
}
/**
* The method that is executed when the user performs a 'delete' action to
* remove any entity.
*
* @return RedirectResponse
*/
protected function deleteAction()
{
$this->dispatch(EasyAdminEvents::PRE_DELETE);
if ('DELETE' !== $this->request->getMethod()) {
return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$id = $this->request->query->get('id');
$form = $this->createDeleteForm($this->entity['name'], $id);
$form->handleRequest($this->request);
if ($form->isValid()) {
$easyadmin = $this->request->attributes->get('easyadmin');
$entity = $easyadmin['item'];
$this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity));
$this->executeDynamicMethod('preRemove<EntityName>Entity', array($entity));
$this->em->remove($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity));
}
$refererUrl = $this->request->query->get('referer', '');
$this->dispatch(EasyAdminEvents::POST_DELETE);
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
/**
* The method that is executed when the user performs a query on an entity.
*
* @return Response
*/
protected function searchAction()
{
$this->dispatch(EasyAdminEvents::PRE_SEARCH);
$searchableFields = $this->entity['search']['fields'];
$paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results']);
$fields = $this->entity['list']['fields'];
$this->dispatch(EasyAdminEvents::POST_SEARCH, array(
'fields' => $fields,
'paginator' => $paginator,
));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* Modifies the entity properties via an Ajax call. Currently it's used for
* changing the value of boolean properties when the user clicks on the
* flip switched displayed for boolean values in the 'list' action.
*/
protected function ajaxEdit()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if (!$entity = $this->em->getRepository($this->entity['class'])->find($this->request->query->get('id'))) {
throw new \Exception('The entity does not exist.');
}
$propertyName = $this->request->query->get('property');
$propertyMetadata = $this->entity['list']['fields'][$propertyName];
if (!isset($this->entity['list']['fields'][$propertyName]) || 'toggle' != $propertyMetadata['dataType']) {
throw new \Exception(sprintf('The "%s" property is not a switchable toggle.', $propertyName));
}
if (!$propertyMetadata['isWritable']) {
throw new \Exception(sprintf('It\'s not possible to toggle the value of the "%s" boolean property of the "%s" entity.', $propertyName, $this->entity['name']));
}
$newValue = ('true' === strtolower($this->request->query->get('newValue'))) ? true : false;
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
if (null !== $setter = $propertyMetadata['setter']) {
$entity->{$setter}($newValue);
} else {
$entity->{$propertyName} = $newValue;
}
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
$this->dispatch(EasyAdminEvents::POST_EDIT);
return new Response((string) $newValue);
}
/**
* Creates a new object of the current managed entity.
* This method is mostly here for override convenience, because it allows
* the user to use his own method to customize the entity instantiation.
*
* @return object
*/
protected function createNewEntity()
{
$entityFullyQualifiedClassName = $this->entity['class'];
return new $entityFullyQualifiedClassName();
}
/**
* Allows applications to modify the entity associated with the item being
* created before persisting it.
*
* @param object $entity
*/
protected function prePersistEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* edited before persisting it.
*
* @param object $entity
*/
protected function preUpdateEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function preRemoveEntity($entity)
{
}
/**
* Performs a database query to get all the records related to the given
* entity. It supports pagination and field sorting.
*
* @param string $entityClass
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
*
* @return Pagerfanta The paginated query results
*/
protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null)
{
if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) {
$sortDirection = 'DESC';
}
$queryBuilder = $this->executeDynamicMethod('create<EntityName>ListQueryBuilder', array($entityClass, $sortDirection, $sortField));
$this->dispatch(EasyAdminEvents::POST_LIST_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'sort_field' => $sortField,
'sort_direction' => $sortDirection,
));
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates Query Builder instance for all the records.
*
* @param string $entityClass
* @param string $sortDirection
* @param string|null $sortField
*
* @return QueryBuilder The Query Builder instance
*/
protected function createListQueryBuilder($entityClass, $sortDirection, $sortField = null)
{
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
if (null !== $sortField) {
$queryBuilder->orderBy('entity.'.$sortField, $sortDirection);
}
return $queryBuilder;
}
/**
* Performs a database query based on the search query provided by the user.
* It supports pagination and field sorting.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param int $page
* @param int $maxPerPage
*
* @return Pagerfanta The paginated query results
*/
protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15)
{
$queryBuilder = $this->executeDynamicMethod('create<EntityName>SearchQueryBuilder', array($entityClass, $searchQuery, $searchableFields));
$this->dispatch(EasyAdminEvents::POST_SEARCH_QUERY_BUILDER, array(
'query_builder' => $queryBuilder,
'search_query' => $searchQuery,
'searchable_fields' => $searchableFields,
));
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates Query Builder instance for search query.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
*
* @return QueryBuilder The Query Builder instance
*/
protected function createSearchQueryBuilder($entityClass, $searchQuery, array $searchableFields)
{
$databaseIsPostgreSql = $this->isPostgreSqlUsedByEntity($entityClass);
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
$queryConditions = $queryBuilder->expr()->orX();
$queryParameters = array();
foreach ($searchableFields as $name => $metadata) {
$isNumericField = in_array($metadata['dataType'], array('integer', 'number', 'smallint', 'bigint', 'decimal', 'float'));
$isTextField = in_array($metadata['dataType'], array('string', 'text', 'guid'));
if (is_numeric($searchQuery) && $isNumericField) {
$queryConditions->add(sprintf('entity.%s = :exact_query', $name));
$queryParameters['exact_query'] = 0 + $searchQuery; // adding '0' turns the string into a numeric value
} elseif ($isTextField) {
$queryConditions->add(sprintf('entity.%s LIKE :fuzzy_query', $name));
$queryParameters['fuzzy_query'] = '%'.$searchQuery.'%';
} else {
// PostgreSQL doesn't allow to compare string values with non-string columns (e.g. 'id')
if ($databaseIsPostgreSql) {
continue;
}
$queryConditions->add(sprintf('entity.%s IN (:words)', $name));
$queryParameters['words'] = explode(' ', $searchQuery);
}
}
$queryBuilder->add('where', $queryConditions)->setParameters($queryParameters);
return $queryBuilder;
}
/**
* Creates the form used to edit an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createEditForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'edit');
}
/**
* Creates the form used to create an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createNewForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'new');
}
/**
* Creates the form builder of the form used to create or edit the given entity.
*
* @param object $entity
* @param string $view The name of the view where this form is used ('new' or 'edit')
*
* @return FormBuilder
*/
protected function createEntityFormBuilder($entity, $view)
{
$formOptions = $this->entity[$view]['form_options'];
$formOptions['entity'] = $this->entity['name'];
$formOptions['view'] = $view;
$formType = $this->useLegacyFormComponent() ? 'easyadmin' : 'JavierEguiluz\\Bundle\\EasyAdminBundle\\Form\\Type\\EasyAdminFormType';
return $this->get('form.factory')->createNamedBuilder('form', $formType, $entity, $formOptions);
}
/**
* Creates the form object used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view
*
* @return Form
*
* @throws \Exception
*/
protected function createEntityForm($entity, array $entityProperties, $view)
{
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) {
$form = $this->{$customMethodName}($entity, $entityProperties, $view);
if (!$form instanceof FormInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormInterface, "%s" given.',
$customMethodName, is_object($form) ? get_class($form) : gettype($form)
));
}
return $form;
}
$formBuilder = $this->executeDynamicMethod('create<EntityName>EntityFormBuilder', array($entity, $view));
if (!$formBuilder instanceof FormBuilderInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormBuilderInterface, "%s" given.',
'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder)
));
}
return $formBuilder->getForm();
}
/**
* Creates the form used to delete an entity. It must be a form because
* the deletion of the entity are always performed with the 'DELETE' HTTP method,
* which requires a form to work in the current browsers.
*
* @param string $entityName
* @param int $entityId
*
* @return Form
*/
protected function createDeleteForm($entityName, $entityId)
{
/** @var FormBuilder $formBuilder */
$formBuilder = $this->get('form.factory')->createNamedBuilder('delete_form')
->setAction($this->generateUrl('easyadmin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId)))
->setMethod('DELETE')
;
$submitButtonType = $this->useLegacyFormComponent() ? 'submit' : 'Symfony\\Component\\Form\\Extension\\Core\\Type\\SubmitType';
$formBuilder->add('submit', $submitButtonType, array('label' => 'Delete'));
return $formBuilder->getForm();
}
/**
* Utility shortcut to render a template as a 404 error page.
*
* @param string $view
* @param array $parameters
*
* @deprecated Use an appropriate exception instead of this method.
*
* @return Response
*/
protected function render404error($view, array $parameters = array())
{
return $this->render($view, $parameters, new Response('', 404));
}
/**
* Utility method that checks if the given action is allowed for
* the current entity.
*
* @param string $actionName
*
* @return bool
*/
protected function isActionAllowed($actionName)
{
return false === in_array($actionName, $this->entity['disabled_actions'], true);
}
/**
* Utility shortcut to render an error when the requested action is not allowed
* for the given entity.
*
* @param string $action
*
* @deprecated Use the ForbiddenException instead of this method.
*
* @return Response
*/
protected function renderForbiddenActionError($action)
{
return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403));
}
/**
* Returns true if the data of the given entity are stored in a database
* of Type PostgreSQL.
*
* @param string $entityClass
*
* @return bool
*/
private function isPostgreSqlUsedByEntity($entityClass)
{
$em = $this->get('doctrine')->getManagerForClass($entityClass);
return $em->getConnection()->getDatabasePlatform() instanceof PostgreSqlPlatform;
}
/**
* Given a method name pattern, it looks for the customized version of that
* method (based on the entity name) and executes it. If the custom method
* does not exist, it executes the regular method.
*
* For example:
* executeDynamicMethod('create<EntityName>Entity') and the entity name is 'User'
* if 'createUserEntity()' exists, execute it; otherwise execute 'createEntity()'
*
* @param string $methodNamePattern The pattern of the method name (dynamic parts are enclosed with <> angle brackets)
* @param array $arguments The arguments passed to the executed method
*
* @return mixed
*/
private function executeDynamicMethod($methodNamePattern, array $arguments = array())
{
$methodName = str_replace('<EntityName>', $this->entity['name'], $methodNamePattern);
if (!method_exists($this, $methodName)) {
$methodName = str_replace('<EntityName>', '', $methodNamePattern);
}
return call_user_func_array(array($this, $methodName), $arguments);
}
/**
* Returns true if the legacy Form component is being used by the application.
*
* @return bool
*/
private function useLegacyFormComponent()
{
return false === class_exists('Symfony\\Component\\Form\\Util\\StringUtil');
}
}
| 1 | 9,412 | Why send the parameters? They're accessible directly from `$this->request` so there's no need to inject them in the method | EasyCorp-EasyAdminBundle | php |
@@ -186,6 +186,18 @@ class BrowserPage(QWebPage):
errpage.encoding = 'utf-8'
return True
+ def chooseFile(self, parent_frame: QWebFrame, suggested_file: str):
+ """Override chooseFiles to (optionally) invoke custom file uploader."""
+ handler = config.val.fileselect.handler
+ if handler == "default":
+ return super().chooseFile(parent_frame, suggested_file)
+
+ selected_file = shared.choose_file(multiple=False)
+ if selected_file == []:
+ return ''
+ else:
+ return selected_file[0]
+
def _handle_multiple_files(self, info, files):
"""Handle uploading of multiple files.
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main browser widgets."""
import html
import functools
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QUrl, QPoint
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkReply, QNetworkRequest
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtPrintSupport import QPrintDialog
from PyQt5.QtWebKitWidgets import QWebPage, QWebFrame
from qutebrowser.config import config
from qutebrowser.browser import pdfjs, shared, downloads
from qutebrowser.browser.webkit import http
from qutebrowser.browser.webkit.network import networkmanager
from qutebrowser.utils import message, usertypes, log, jinja, objreg
from qutebrowser.qt import sip
class BrowserPage(QWebPage):
"""Our own QWebPage with advanced features.
Attributes:
error_occurred: Whether an error occurred while loading.
_extension_handlers: Mapping of QWebPage extensions to their handlers.
_networkmanager: The NetworkManager used.
_win_id: The window ID this BrowserPage is associated with.
_ignore_load_started: Whether to ignore the next loadStarted signal.
_is_shutting_down: Whether the page is currently shutting down.
_tabdata: The TabData object of the tab this page is in.
Signals:
shutting_down: Emitted when the page is currently shutting down.
reloading: Emitted before a web page reloads.
arg: The URL which gets reloaded.
navigation_request: Emitted on acceptNavigationRequest.
"""
shutting_down = pyqtSignal()
reloading = pyqtSignal(QUrl)
navigation_request = pyqtSignal(usertypes.NavigationRequest)
def __init__(self, win_id, tab_id, tabdata, private, parent=None):
super().__init__(parent)
self._win_id = win_id
self._tabdata = tabdata
self._is_shutting_down = False
self._extension_handlers = {
QWebPage.ErrorPageExtension: self._handle_errorpage,
QWebPage.ChooseMultipleFilesExtension: self._handle_multiple_files,
}
self._ignore_load_started = False
self.error_occurred = False
self._networkmanager = networkmanager.NetworkManager(
win_id=win_id, tab_id=tab_id, private=private, parent=self)
self.setNetworkAccessManager(self._networkmanager)
self.setForwardUnsupportedContent(True)
self.reloading.connect(self._networkmanager.clear_rejected_ssl_errors)
self.printRequested.connect(self.on_print_requested)
self.downloadRequested.connect(self.on_download_requested)
self.unsupportedContent.connect(self.on_unsupported_content)
self.loadStarted.connect(self.on_load_started)
self.featurePermissionRequested.connect(
self._on_feature_permission_requested)
self.saveFrameStateRequested.connect(
self.on_save_frame_state_requested)
self.restoreFrameStateRequested.connect(
self.on_restore_frame_state_requested)
self.loadFinished.connect(
functools.partial(self._inject_userjs, self.mainFrame()))
self.frameCreated.connect(self._connect_userjs_signals)
@pyqtSlot('QWebFrame*')
def _connect_userjs_signals(self, frame):
"""Connect userjs related signals to `frame`.
Connect the signals used as triggers for injecting user
JavaScripts into the passed QWebFrame.
"""
log.greasemonkey.debug("Connecting to frame {} ({})"
.format(frame, frame.url().toDisplayString()))
frame.loadFinished.connect(
functools.partial(self._inject_userjs, frame))
def javaScriptPrompt(self, frame, js_msg, default):
"""Override javaScriptPrompt to use qutebrowser prompts."""
if self._is_shutting_down:
return (False, "")
try:
return shared.javascript_prompt(frame.url(), js_msg, default,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
return super().javaScriptPrompt(frame, js_msg, default)
def _handle_errorpage(self, info, errpage):
"""Display an error page if needed.
Loosely based on Helpviewer/HelpBrowserWV.py from eric5
(line 260 @ 5d937eb378dd)
Args:
info: The QWebPage.ErrorPageExtensionOption instance.
errpage: The QWebPage.ErrorPageExtensionReturn instance, where the
error page will get written to.
Return:
False if no error page should be displayed, True otherwise.
"""
ignored_errors = [
(QWebPage.QtNetwork, QNetworkReply.OperationCanceledError),
# "Loading is handled by the media engine"
(QWebPage.WebKit, 203),
# "Frame load interrupted by policy change"
(QWebPage.WebKit, 102),
]
errpage.baseUrl = info.url
urlstr = info.url.toDisplayString()
if (info.domain, info.error) == (QWebPage.QtNetwork,
QNetworkReply.ProtocolUnknownError):
# For some reason, we get a segfault when we use
# QDesktopServices::openUrl with info.url directly - however it
# works when we construct a copy of it.
url = QUrl(info.url)
scheme = url.scheme()
message.confirm_async(
title="Open external application for {}-link?".format(scheme),
text="URL: <b>{}</b>".format(
html.escape(url.toDisplayString())),
yes_action=functools.partial(QDesktopServices.openUrl, url),
url=info.url.toString(QUrl.RemovePassword | QUrl.FullyEncoded))
return True
elif (info.domain, info.error) in ignored_errors:
log.webview.debug("Ignored error on {}: {} (error domain: {}, "
"error code: {})".format(
urlstr, info.errorString, info.domain,
info.error))
return False
else:
error_str = info.errorString
if error_str == networkmanager.HOSTBLOCK_ERROR_STRING:
# We don't set error_occurred in this case.
error_str = "Request blocked by host blocker."
main_frame = info.frame.page().mainFrame()
if info.frame != main_frame:
# Content in an iframe -> Hide the frame so it doesn't use
# any space. We can't hide the frame's documentElement
# directly though.
for elem in main_frame.documentElement().findAll('iframe'):
if QUrl(elem.attribute('src')) == info.url:
elem.setAttribute('style', 'display: none')
return False
else:
self._ignore_load_started = True
self.error_occurred = True
log.webview.error("Error while loading {}: {}".format(
urlstr, error_str))
log.webview.debug("Error domain: {}, error code: {}".format(
info.domain, info.error))
title = "Error loading page: {}".format(urlstr)
error_html = jinja.render(
'error.html',
title=title, url=urlstr, error=error_str)
errpage.content = error_html.encode('utf-8')
errpage.encoding = 'utf-8'
return True
def _handle_multiple_files(self, info, files):
"""Handle uploading of multiple files.
Loosely based on Helpviewer/HelpBrowserWV.py from eric5.
Args:
info: The ChooseMultipleFilesExtensionOption instance.
files: The ChooseMultipleFilesExtensionReturn instance to write
return values to.
Return:
True on success, the superclass return value on failure.
"""
suggested_file = ""
if info.suggestedFileNames:
suggested_file = info.suggestedFileNames[0]
files.fileNames, _ = QFileDialog.getOpenFileNames(None, None,
suggested_file)
return True
def shutdown(self):
"""Prepare the web page for being deleted."""
self._is_shutting_down = True
self.shutting_down.emit()
download_manager = objreg.get('qtnetwork-download-manager')
nam = self.networkAccessManager()
if download_manager.has_downloads_with_nam(nam):
nam.setParent(download_manager)
else:
nam.shutdown()
def display_content(self, reply, mimetype):
"""Display a QNetworkReply with an explicitly set mimetype."""
self.mainFrame().setContent(reply.readAll(), mimetype, reply.url())
reply.deleteLater()
def on_print_requested(self, frame):
"""Handle printing when requested via javascript."""
printdiag = QPrintDialog()
printdiag.setAttribute(Qt.WA_DeleteOnClose)
printdiag.open(lambda: frame.print(printdiag.printer()))
def on_download_requested(self, request):
"""Called when the user wants to download a link.
We need to construct a copy of the QNetworkRequest here as the
download_manager needs it async and we'd get a segfault otherwise as
soon as the user has entered the filename, as Qt seems to delete it
after this slot returns.
"""
req = QNetworkRequest(request)
download_manager = objreg.get('qtnetwork-download-manager')
download_manager.get_request(req, qnam=self.networkAccessManager())
@pyqtSlot('QNetworkReply*')
def on_unsupported_content(self, reply):
"""Handle an unsupportedContent signal.
Most likely this will mean we need to download the reply, but we
correct for some common errors the server do.
At some point we might want to implement the MIME Sniffing standard
here: http://mimesniff.spec.whatwg.org/
"""
inline, suggested_filename = http.parse_content_disposition(reply)
download_manager = objreg.get('qtnetwork-download-manager')
if not inline:
# Content-Disposition: attachment -> force download
download_manager.fetch(reply,
suggested_filename=suggested_filename)
return
mimetype, _rest = http.parse_content_type(reply)
if mimetype == 'image/jpg':
# Some servers (e.g. the LinkedIn CDN) send a non-standard
# image/jpg (instead of image/jpeg, defined in RFC 1341 section
# 7.5). If this is the case, we force displaying with a corrected
# mimetype.
if reply.isFinished():
self.display_content(reply, 'image/jpeg')
else:
reply.finished.connect(functools.partial(
self.display_content, reply, 'image/jpeg'))
elif pdfjs.should_use_pdfjs(mimetype, reply.url()):
download_manager.fetch(reply,
target=downloads.PDFJSDownloadTarget(),
auto_remove=True)
else:
# Unknown mimetype, so download anyways.
download_manager.fetch(reply,
suggested_filename=suggested_filename)
@pyqtSlot()
def on_load_started(self):
"""Reset error_occurred when loading of a new page started."""
if self._ignore_load_started:
self._ignore_load_started = False
else:
self.error_occurred = False
def _inject_userjs(self, frame):
"""Inject user JavaScripts into the page.
Args:
frame: The QWebFrame to inject the user scripts into.
"""
if sip.isdeleted(frame):
log.greasemonkey.debug("_inject_userjs called for deleted frame!")
return
url = frame.url()
if url.isEmpty():
url = frame.requestedUrl()
log.greasemonkey.debug("_inject_userjs called for {} ({})"
.format(frame, url.toDisplayString()))
greasemonkey = objreg.get('greasemonkey')
scripts = greasemonkey.scripts_for(url)
# QtWebKit has trouble providing us with signals representing
# page load progress at reasonable times, so we just load all
# scripts on the same event.
toload = scripts.start + scripts.end + scripts.idle
if url.isEmpty():
# This happens during normal usage like with view source but may
# also indicate a bug.
log.greasemonkey.debug("Not running scripts for frame with no "
"url: {}".format(frame))
assert not toload, toload
for script in toload:
if frame is self.mainFrame() or script.runs_on_sub_frames:
log.webview.debug('Running GM script: {}'.format(script.name))
frame.evaluateJavaScript(script.code())
@pyqtSlot('QWebFrame*', 'QWebPage::Feature')
def _on_feature_permission_requested(self, frame, feature):
"""Ask the user for approval for geolocation/notifications."""
if not isinstance(frame, QWebFrame): # pragma: no cover
# This makes no sense whatsoever, but someone reported this being
# called with a QBuffer...
log.misc.error("on_feature_permission_requested got called with "
"{!r}!".format(frame))
return
options = {
QWebPage.Notifications: 'content.notifications',
QWebPage.Geolocation: 'content.geolocation',
}
messages = {
QWebPage.Notifications: 'show notifications',
QWebPage.Geolocation: 'access your location',
}
yes_action = functools.partial(
self.setFeaturePermission, frame, feature,
QWebPage.PermissionGrantedByUser)
no_action = functools.partial(
self.setFeaturePermission, frame, feature,
QWebPage.PermissionDeniedByUser)
question = shared.feature_permission(
url=frame.url(),
option=options[feature], msg=messages[feature],
yes_action=yes_action, no_action=no_action,
abort_on=[self.shutting_down, self.loadStarted])
if question is not None:
self.featurePermissionRequestCanceled.connect(
functools.partial(self._on_feature_permission_cancelled,
question, frame, feature))
def _on_feature_permission_cancelled(self, question, frame, feature,
cancelled_frame, cancelled_feature):
"""Slot invoked when a feature permission request was cancelled.
To be used with functools.partial.
"""
if frame is cancelled_frame and feature == cancelled_feature:
try:
question.abort()
except RuntimeError:
# The question could already be deleted, e.g. because it was
# aborted after a loadStarted signal.
pass
def on_save_frame_state_requested(self, frame, item):
"""Save scroll position and zoom in history.
Args:
frame: The QWebFrame which gets saved.
item: The QWebHistoryItem to be saved.
"""
if frame != self.mainFrame():
return
data = {
'zoom': frame.zoomFactor(),
'scroll-pos': frame.scrollPosition(),
}
item.setUserData(data)
def on_restore_frame_state_requested(self, frame):
"""Restore scroll position and zoom from history.
Args:
frame: The QWebFrame which gets restored.
"""
if frame != self.mainFrame():
return
data = self.history().currentItem().userData()
if data is None:
return
if 'zoom' in data:
frame.page().view().tab.zoom.set_factor(data['zoom'])
if 'scroll-pos' in data and frame.scrollPosition() == QPoint(0, 0):
frame.setScrollPosition(data['scroll-pos'])
def userAgentForUrl(self, url):
"""Override QWebPage::userAgentForUrl to customize the user agent."""
ua = config.instance.get('content.headers.user_agent', url=url)
if ua is None:
return super().userAgentForUrl(url)
else:
return ua
def supportsExtension(self, ext):
"""Override QWebPage::supportsExtension to provide error pages.
Args:
ext: The extension to check for.
Return:
True if the extension can be handled, False otherwise.
"""
return ext in self._extension_handlers
def extension(self, ext, opt, out):
"""Override QWebPage::extension to provide error pages.
Args:
ext: The extension.
opt: Extension options instance.
out: Extension output instance.
Return:
Handler return value.
"""
try:
handler = self._extension_handlers[ext]
except KeyError:
log.webview.warning("Extension {} not supported!".format(ext))
return super().extension(ext, opt, out)
return handler(opt, out)
def javaScriptAlert(self, frame, js_msg):
"""Override javaScriptAlert to use qutebrowser prompts."""
if self._is_shutting_down:
return
try:
shared.javascript_alert(frame.url(), js_msg,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
super().javaScriptAlert(frame, js_msg)
def javaScriptConfirm(self, frame, js_msg):
"""Override javaScriptConfirm to use the statusbar."""
if self._is_shutting_down:
return False
try:
return shared.javascript_confirm(frame.url(), js_msg,
abort_on=[self.loadStarted,
self.shutting_down])
except shared.CallSuper:
return super().javaScriptConfirm(frame, js_msg)
def javaScriptConsoleMessage(self, msg, line, source):
"""Override javaScriptConsoleMessage to use debug log."""
shared.javascript_log_message(usertypes.JsLogLevel.unknown,
source, line, msg)
def acceptNavigationRequest(self,
frame: QWebFrame,
request: QNetworkRequest,
typ: QWebPage.NavigationType) -> bool:
"""Override acceptNavigationRequest to handle clicked links.
Setting linkDelegationPolicy to DelegateAllLinks and using a slot bound
to linkClicked won't work correctly, because when in a frameset, we
have no idea in which frame the link should be opened.
Checks if it should open it in a tab (middle-click or control) or not,
and then conditionally opens the URL here or in another tab/window.
"""
type_map = {
QWebPage.NavigationTypeLinkClicked:
usertypes.NavigationRequest.Type.link_clicked,
QWebPage.NavigationTypeFormSubmitted:
usertypes.NavigationRequest.Type.form_submitted,
QWebPage.NavigationTypeFormResubmitted:
usertypes.NavigationRequest.Type.form_resubmitted,
QWebPage.NavigationTypeBackOrForward:
usertypes.NavigationRequest.Type.back_forward,
QWebPage.NavigationTypeReload:
usertypes.NavigationRequest.Type.reloaded,
QWebPage.NavigationTypeOther:
usertypes.NavigationRequest.Type.other,
}
is_main_frame = frame is self.mainFrame()
navigation = usertypes.NavigationRequest(url=request.url(),
navigation_type=type_map[typ],
is_main_frame=is_main_frame)
if navigation.navigation_type == navigation.Type.reloaded:
self.reloading.emit(navigation.url)
self.navigation_request.emit(navigation)
return navigation.accepted
| 1 | 22,807 | Tiny nit: I would prefer `not selected_file` or `len(selected_file) == 0`, as if choose_file starts returning, for example, tuples instead of lists, this won't break. | qutebrowser-qutebrowser | py |
@@ -329,6 +329,15 @@ func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) er
}
for _, member := range members.Members {
+ memberNodeName := strings.Split(member.Name, "-")[0]
+ if memberNodeName == e.config.ServerNodeName {
+ // make sure to remove the name file if a duplicate node name is used
+ nameFile := nameFile(e.config)
+ if err := os.Remove(nameFile); err != nil {
+ return err
+ }
+ return errors.New("Failed to join etcd cluster due to duplicate node names, please use unique node name for the server")
+ }
for _, peer := range member.PeerURLs {
u, err := url.Parse(peer)
if err != nil { | 1 | package etcd
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/k3s-io/kine/pkg/client"
endpoint2 "github.com/k3s-io/kine/pkg/endpoint"
"github.com/minio/minio-go/v7"
"github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control/deps"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/version"
controllerv1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
"github.com/robfig/cron/v3"
"github.com/sirupsen/logrus"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/etcdutl/v3/snapshot"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/util/retry"
)
const (
endpoint = "https://127.0.0.1:2379"
testTimeout = time.Second * 10
manageTickerTime = time.Second * 15
learnerMaxStallTime = time.Minute * 5
memberRemovalTimeout = time.Minute * 1
// defaultDialTimeout is intentionally short so that connections timeout within the testTimeout defined above
defaultDialTimeout = 2 * time.Second
// other defaults from k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
defaultKeepAliveTime = 30 * time.Second
defaultKeepAliveTimeout = 10 * time.Second
maxBackupRetention = 5
MasterLabel = "node-role.kubernetes.io/master"
ControlPlaneLabel = "node-role.kubernetes.io/control-plane"
EtcdRoleLabel = "node-role.kubernetes.io/etcd"
)
var (
learnerProgressKey = version.Program + "/etcd/learnerProgress"
// AddressKey will contain the value of api addresses list
AddressKey = version.Program + "/apiaddresses"
snapshotConfigMapName = version.Program + "-etcd-snapshots"
NodeNameAnnotation = "etcd." + version.Program + ".cattle.io/node-name"
NodeAddressAnnotation = "etcd." + version.Program + ".cattle.io/node-address"
)
type NodeControllerGetter func() controllerv1.NodeController
type ETCD struct {
client *clientv3.Client
config *config.Control
name string
runtime *config.ControlRuntime
address string
cron *cron.Cron
s3 *S3
}
type learnerProgress struct {
ID uint64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
RaftAppliedIndex uint64 `json:"raftAppliedIndex,omitempty"`
LastProgress metav1.Time `json:"lastProgress,omitempty"`
}
// Members contains a slice that holds all
// members of the cluster.
type Members struct {
Members []*etcdserverpb.Member `json:"members"`
}
// NewETCD creates a new value of type
// ETCD with an initialized cron value.
func NewETCD() *ETCD {
return &ETCD{
cron: cron.New(),
}
}
// EndpointName returns the name of the endpoint.
func (e *ETCD) EndpointName() string {
return "etcd"
}
// SetControlConfig sets the given config on the etcd struct.
func (e *ETCD) SetControlConfig(config *config.Control) {
e.config = config
}
// Test ensures that the local node is a voting member of the target cluster.
// If it is still a learner or not a part of the cluster, an error is raised.
func (e *ETCD) Test(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
status, err := e.client.Status(ctx, endpoint)
if err != nil {
return err
}
if status.IsLearner {
return errors.New("this server has not yet been promoted from learner to voting member")
}
members, err := e.client.MemberList(ctx)
if err != nil {
return err
}
var memberNameUrls []string
for _, member := range members.Members {
for _, peerURL := range member.PeerURLs {
if peerURL == e.peerURL() && e.name == member.Name {
return nil
}
}
if len(member.PeerURLs) > 0 {
memberNameUrls = append(memberNameUrls, member.Name+"="+member.PeerURLs[0])
}
}
return errors.Errorf("this server is a not a member of the etcd cluster. Found %v, expect: %s=%s", memberNameUrls, e.name, e.address)
}
// etcdDBDir returns the path to dataDir/db/etcd
func etcdDBDir(config *config.Control) string {
return filepath.Join(config.DataDir, "db", "etcd")
}
// walDir returns the path to etcdDBDir/member/wal
func walDir(config *config.Control) string {
return filepath.Join(etcdDBDir(config), "member", "wal")
}
func sqliteFile(config *config.Control) string {
return filepath.Join(config.DataDir, "db", "state.db")
}
// nameFile returns the path to etcdDBDir/name.
func nameFile(config *config.Control) string {
return filepath.Join(etcdDBDir(config), "name")
}
// ResetFile returns the path to etcdDBDir/reset-flag.
func ResetFile(config *config.Control) string {
return filepath.Join(config.DataDir, "db", "reset-flag")
}
// IsInitialized checks to see if a WAL directory exists. If so, we assume that etcd
// has already been brought up at least once.
func (e *ETCD) IsInitialized(ctx context.Context, config *config.Control) (bool, error) {
dir := walDir(config)
if s, err := os.Stat(dir); err == nil && s.IsDir() {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else {
return false, errors.Wrapf(err, "invalid state for wal directory %s", dir)
}
}
// Reset resets an etcd node
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error) error {
// Wait for etcd to come up as a new single-node cluster, then exit
go func() {
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for range t.C {
if err := e.Test(ctx); err == nil {
members, err := e.client.MemberList(ctx)
if err != nil {
continue
}
// storageBootstrap() - runtime structure has been written with correct certificate data
if err := rebootstrap(); err != nil {
logrus.Fatal(err)
}
// call functions to rewrite them from daemons/control/server.go (prepare())
if err := deps.GenServerDeps(e.config, e.runtime); err != nil {
logrus.Fatal(err)
}
if len(members.Members) == 1 && members.Members[0].Name == e.name {
logrus.Infof("Etcd is running, restart without --cluster-reset flag now. Backup and delete ${datadir}/server/db on each peer etcd server and rejoin the nodes")
os.Exit(0)
}
}
}
}()
// If asked to restore from a snapshot, do so
if e.config.ClusterResetRestorePath != "" {
if e.config.EtcdS3 {
if err := e.initS3IfNil(ctx); err != nil {
return err
}
logrus.Infof("Retrieving etcd snapshot %s from S3", e.config.ClusterResetRestorePath)
if err := e.s3.Download(ctx); err != nil {
return err
}
logrus.Infof("S3 download complete for %s", e.config.ClusterResetRestorePath)
}
info, err := os.Stat(e.config.ClusterResetRestorePath)
if os.IsNotExist(err) {
return fmt.Errorf("etcd: snapshot path does not exist: %s", e.config.ClusterResetRestorePath)
}
if info.IsDir() {
return fmt.Errorf("etcd: snapshot path must be a file, not a directory: %s", e.config.ClusterResetRestorePath)
}
if err := e.Restore(ctx); err != nil {
return err
}
}
if err := e.setName(true); err != nil {
return err
}
// touch a file to avoid multiple resets
if err := ioutil.WriteFile(ResetFile(e.config), []byte{}, 0600); err != nil {
return err
}
return e.newCluster(ctx, true)
}
// Start starts the datastore
func (e *ETCD) Start(ctx context.Context, clientAccessInfo *clientaccess.Info) error {
existingCluster, err := e.IsInitialized(ctx, e.config)
if err != nil {
return errors.Wrapf(err, "configuration validation failed")
}
if !e.config.EtcdDisableSnapshots {
e.setSnapshotFunction(ctx)
e.cron.Start()
}
go e.manageLearners(ctx)
if existingCluster {
//check etcd dir permission
etcdDir := etcdDBDir(e.config)
info, err := os.Stat(etcdDir)
if err != nil {
return err
}
if info.Mode() != 0700 {
if err := os.Chmod(etcdDir, 0700); err != nil {
return err
}
}
opt, err := executor.CurrentETCDOptions()
if err != nil {
return err
}
return e.cluster(ctx, false, opt)
}
if clientAccessInfo == nil {
return e.newCluster(ctx, false)
}
err = e.join(ctx, clientAccessInfo)
return errors.Wrap(err, "joining etcd cluster")
}
// join attempts to add a member to an existing cluster
func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) error {
clientCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
var (
cluster []string
add = true
)
clientURLs, memberList, err := ClientURLs(clientCtx, clientAccessInfo, e.config.PrivateIP)
if err != nil {
return err
}
client, err := GetClient(clientCtx, e.runtime, clientURLs...)
if err != nil {
return err
}
defer client.Close()
members, err := client.MemberList(clientCtx)
if err != nil {
logrus.Errorf("Failed to get member list from etcd cluster. Will assume this member is already added")
members = &clientv3.MemberListResponse{
Members: append(memberList.Members, &etcdserverpb.Member{
Name: e.name,
PeerURLs: []string{e.peerURL()},
}),
}
add = false
}
for _, member := range members.Members {
for _, peer := range member.PeerURLs {
u, err := url.Parse(peer)
if err != nil {
return err
}
// An uninitialized member won't have a name
if u.Hostname() == e.address && (member.Name == e.name || member.Name == "") {
add = false
}
if member.Name == "" && u.Hostname() == e.address {
member.Name = e.name
}
if len(member.PeerURLs) > 0 {
cluster = append(cluster, fmt.Sprintf("%s=%s", member.Name, member.PeerURLs[0]))
}
}
}
if add {
logrus.Infof("Adding %s to etcd cluster %v", e.peerURL(), cluster)
if _, err = client.MemberAddAsLearner(clientCtx, []string{e.peerURL()}); err != nil {
return err
}
cluster = append(cluster, fmt.Sprintf("%s=%s", e.name, e.peerURL()))
}
logrus.Infof("Starting etcd for cluster %v", cluster)
return e.cluster(ctx, false, executor.InitialOptions{
Cluster: strings.Join(cluster, ","),
State: "existing",
})
}
// Register configures a new etcd client and adds db info routes for the http request handler.
func (e *ETCD) Register(ctx context.Context, config *config.Control, handler http.Handler) (http.Handler, error) {
e.config = config
e.runtime = config.Runtime
client, err := GetClient(ctx, e.runtime, endpoint)
if err != nil {
return nil, err
}
e.client = client
address, err := GetAdvertiseAddress(config.PrivateIP)
if err != nil {
return nil, err
}
e.address = address
e.config.Datastore.Endpoint = endpoint
e.config.Datastore.BackendTLSConfig.CAFile = e.runtime.ETCDServerCA
e.config.Datastore.BackendTLSConfig.CertFile = e.runtime.ClientETCDCert
e.config.Datastore.BackendTLSConfig.KeyFile = e.runtime.ClientETCDKey
tombstoneFile := filepath.Join(etcdDBDir(e.config), "tombstone")
if _, err := os.Stat(tombstoneFile); err == nil {
logrus.Infof("tombstone file has been detected, removing data dir to rejoin the cluster")
if _, err := backupDirWithRetention(etcdDBDir(e.config), maxBackupRetention); err != nil {
return nil, err
}
}
if err := e.setName(false); err != nil {
return nil, err
}
e.config.Runtime.ClusterControllerStart = func(ctx context.Context) error {
RegisterMetadataHandlers(ctx, e, e.config.Runtime.Core.Core().V1().Node())
return nil
}
e.config.Runtime.LeaderElectedClusterControllerStart = func(ctx context.Context) error {
RegisterMemberHandlers(ctx, e, e.config.Runtime.Core.Core().V1().Node())
return nil
}
return e.handler(handler), err
}
// setName sets a unique name for this cluster member. The first time this is called,
// or if force is set to true, a new name will be generated and written to disk. The persistent
// name is used on subsequent calls.
func (e *ETCD) setName(force bool) error {
fileName := nameFile(e.config)
data, err := ioutil.ReadFile(fileName)
if os.IsNotExist(err) || force {
h, err := os.Hostname()
if err != nil {
return err
}
e.name = strings.SplitN(h, ".", 2)[0] + "-" + uuid.New().String()[:8]
if err := os.MkdirAll(filepath.Dir(fileName), 0700); err != nil {
return err
}
return ioutil.WriteFile(fileName, []byte(e.name), 0600)
} else if err != nil {
return err
}
e.name = string(data)
return nil
}
// handler wraps the handler with routes for database info
func (e *ETCD) handler(next http.Handler) http.Handler {
mux := mux.NewRouter()
mux.Handle("/db/info", e.infoHandler())
mux.NotFoundHandler = next
return mux
}
// infoHandler returns etcd cluster information. This is used by new members when joining the custer.
func (e *ETCD) infoHandler() http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
ctx, cancel := context.WithTimeout(req.Context(), 2*time.Second)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
json.NewEncoder(rw).Encode(&Members{
Members: []*etcdserverpb.Member{
{
Name: e.name,
PeerURLs: []string{e.peerURL()},
ClientURLs: []string{e.clientURL()},
},
},
})
return
}
rw.Header().Set("Content-Type", "application/json")
json.NewEncoder(rw).Encode(&Members{
Members: members.Members,
})
})
}
// GetClient returns an etcd client connected to the specified endpoints
func GetClient(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*clientv3.Client, error) {
cfg, err := getClientConfig(ctx, runtime, endpoints...)
if err != nil {
return nil, err
}
return clientv3.New(*cfg)
}
// getClientConfig generates an etcd client config connected to the specified endpoints
func getClientConfig(ctx context.Context, runtime *config.ControlRuntime, endpoints ...string) (*clientv3.Config, error) {
tlsConfig, err := toTLSConfig(runtime)
if err != nil {
return nil, err
}
cfg := &clientv3.Config{
Endpoints: endpoints,
TLS: tlsConfig,
Context: ctx,
DialTimeout: defaultDialTimeout,
DialKeepAliveTime: defaultKeepAliveTime,
DialKeepAliveTimeout: defaultKeepAliveTimeout,
}
return cfg, nil
}
// toTLSConfig converts the ControlRuntime configuration to TLS configuration suitable
// for use by etcd.
func toTLSConfig(runtime *config.ControlRuntime) (*tls.Config, error) {
clientCert, err := tls.LoadX509KeyPair(runtime.ClientETCDCert, runtime.ClientETCDKey)
if err != nil {
return nil, err
}
pool, err := certutil.NewPool(runtime.ETCDServerCA)
if err != nil {
return nil, err
}
return &tls.Config{
RootCAs: pool,
Certificates: []tls.Certificate{clientCert},
}, nil
}
// getAdvertiseAddress returns the IP address best suited for advertising to clients
func GetAdvertiseAddress(advertiseIP string) (string, error) {
ip := advertiseIP
if ip == "" {
ipAddr, err := utilnet.ChooseHostInterface()
if err != nil {
return "", err
}
ip = ipAddr.String()
}
return ip, nil
}
// newCluster returns options to set up etcd for a new cluster
func (e *ETCD) newCluster(ctx context.Context, reset bool) error {
err := e.cluster(ctx, reset, executor.InitialOptions{
AdvertisePeerURL: fmt.Sprintf("https://%s:2380", e.address),
Cluster: fmt.Sprintf("%s=https://%s:2380", e.name, e.address),
State: "new",
})
if err != nil {
return err
}
if err := e.migrateFromSQLite(ctx); err != nil {
return fmt.Errorf("failed to migrate content from sqlite to etcd: %w", err)
}
return nil
}
func (e *ETCD) migrateFromSQLite(ctx context.Context) error {
_, err := os.Stat(sqliteFile(e.config))
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
logrus.Infof("Migrating content from sqlite to etcd")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, err = endpoint2.Listen(ctx, endpoint2.Config{
Endpoint: endpoint2.SQLiteBackend,
})
if err != nil {
return err
}
sqliteClient, err := client.New(endpoint2.ETCDConfig{
Endpoints: []string{"unix://kine.sock"},
})
if err != nil {
return err
}
defer sqliteClient.Close()
etcdClient, err := GetClient(ctx, e.runtime, "https://localhost:2379")
if err != nil {
return err
}
defer etcdClient.Close()
values, err := sqliteClient.List(ctx, "/registry/", 0)
if err != nil {
return err
}
for _, value := range values {
logrus.Infof("Migrating etcd key %s", value.Key)
_, err := etcdClient.Put(ctx, string(value.Key), string(value.Data))
if err != nil {
return err
}
}
return os.Rename(sqliteFile(e.config), sqliteFile(e.config)+".migrated")
}
// peerURL returns the peer access address for the local node
func (e *ETCD) peerURL() string {
return fmt.Sprintf("https://%s:2380", e.address)
}
// clientURL returns the client access address for the local node
func (e *ETCD) clientURL() string {
return fmt.Sprintf("https://%s:2379", e.address)
}
// metricsURL returns the metrics access address
func (e *ETCD) metricsURL(expose bool) string {
address := "http://127.0.0.1:2381"
if expose {
address = fmt.Sprintf("http://%s:2381,%s", e.address, address)
}
return address
}
// cluster returns ETCDConfig for a cluster
func (e *ETCD) cluster(ctx context.Context, forceNew bool, options executor.InitialOptions) error {
return executor.ETCD(ctx, executor.ETCDConfig{
Name: e.name,
InitialOptions: options,
ForceNewCluster: forceNew,
ListenClientURLs: e.clientURL() + ",https://127.0.0.1:2379",
ListenMetricsURLs: e.metricsURL(e.config.EtcdExposeMetrics),
ListenPeerURLs: e.peerURL(),
AdvertiseClientURLs: e.clientURL(),
DataDir: etcdDBDir(e.config),
ServerTrust: executor.ServerTrust{
CertFile: e.config.Runtime.ServerETCDCert,
KeyFile: e.config.Runtime.ServerETCDKey,
ClientCertAuth: true,
TrustedCAFile: e.config.Runtime.ETCDServerCA,
},
PeerTrust: executor.PeerTrust{
CertFile: e.config.Runtime.PeerServerClientETCDCert,
KeyFile: e.config.Runtime.PeerServerClientETCDKey,
ClientCertAuth: true,
TrustedCAFile: e.config.Runtime.ETCDPeerCA,
},
ElectionTimeout: 5000,
HeartbeatInterval: 500,
Logger: "zap",
LogOutputs: []string{"stderr"},
})
}
// RemovePeer removes a peer from the cluster. The peer name and IP address must both match.
func (e *ETCD) RemovePeer(ctx context.Context, name, address string, allowSelfRemoval bool) error {
ctx, cancel := context.WithTimeout(ctx, memberRemovalTimeout)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
return err
}
for _, member := range members.Members {
if member.Name != name {
continue
}
for _, peerURL := range member.PeerURLs {
u, err := url.Parse(peerURL)
if err != nil {
return err
}
if u.Hostname() == address {
if e.address == address && !allowSelfRemoval {
return errors.New("not removing self from etcd cluster")
}
logrus.Infof("Removing name=%s id=%d address=%s from etcd", member.Name, member.ID, address)
_, err := e.client.MemberRemove(ctx, member.ID)
if err == rpctypes.ErrGRPCMemberNotFound {
return nil
}
return err
}
}
}
return nil
}
// manageLearners monitors the etcd cluster to ensure that learners are making progress towards
// being promoted to full voting member. The checks only run on the cluster member that is
// the etcd leader.
func (e *ETCD) manageLearners(ctx context.Context) error {
t := time.NewTicker(manageTickerTime)
defer t.Stop()
for range t.C {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
// Check to see if the local node is the leader. Only the leader should do learner management.
if e.client == nil {
logrus.Error("Etcd client was nil")
continue
}
if status, err := e.client.Status(ctx, endpoint); err != nil {
logrus.Errorf("Failed to check local etcd status for learner management: %v", err)
continue
} else if status.Header.MemberId != status.Leader {
continue
}
progress, err := e.getLearnerProgress(ctx)
if err != nil {
logrus.Errorf("Failed to get recorded learner progress from etcd: %v", err)
continue
}
members, err := e.client.MemberList(ctx)
if err != nil {
logrus.Errorf("Failed to get etcd members for learner management: %v", err)
continue
}
for _, member := range members.Members {
if member.IsLearner {
if err := e.trackLearnerProgress(ctx, progress, member); err != nil {
logrus.Errorf("Failed to track learner progress towards promotion: %v", err)
}
break
}
}
}
return nil
}
// trackLearnerProcess attempts to promote a learner. If it cannot be promoted, progress through the raft index is tracked.
// If the learner does not make any progress in a reasonable amount of time, it is evicted from the cluster.
func (e *ETCD) trackLearnerProgress(ctx context.Context, progress *learnerProgress, member *etcdserverpb.Member) error {
// Try to promote it. If it can be promoted, no further tracking is necessary
if _, err := e.client.MemberPromote(ctx, member.ID); err != nil {
logrus.Debugf("Unable to promote learner %s: %v", member.Name, err)
} else {
logrus.Infof("Promoted learner %s", member.Name)
return nil
}
now := time.Now()
// If this is the first time we've tracked this member's progress, reset stats
if progress.Name != member.Name || progress.ID != member.ID {
progress.ID = member.ID
progress.Name = member.Name
progress.RaftAppliedIndex = 0
progress.LastProgress.Time = now
}
// Update progress by retrieving status from the member's first reachable client URL
for _, ep := range member.ClientURLs {
ctx, cancel := context.WithTimeout(ctx, defaultDialTimeout)
defer cancel()
status, err := e.client.Status(ctx, ep)
if err != nil {
logrus.Debugf("Failed to get etcd status from learner %s at %s: %v", member.Name, ep, err)
continue
}
if progress.RaftAppliedIndex < status.RaftAppliedIndex {
logrus.Debugf("Learner %s has progressed from RaftAppliedIndex %d to %d", progress.Name, progress.RaftAppliedIndex, status.RaftAppliedIndex)
progress.RaftAppliedIndex = status.RaftAppliedIndex
progress.LastProgress.Time = now
}
break
}
// Warn if the learner hasn't made any progress
if !progress.LastProgress.Time.Equal(now) {
logrus.Warnf("Learner %s stalled at RaftAppliedIndex=%d for %s", progress.Name, progress.RaftAppliedIndex, now.Sub(progress.LastProgress.Time).String())
}
// See if it's time to evict yet
if now.Sub(progress.LastProgress.Time) > learnerMaxStallTime {
if _, err := e.client.MemberRemove(ctx, member.ID); err != nil {
return err
}
logrus.Warnf("Removed learner %s from etcd cluster", member.Name)
return nil
}
return e.setLearnerProgress(ctx, progress)
}
// getLearnerProgress returns the stored learnerProgress struct as retrieved from etcd
func (e *ETCD) getLearnerProgress(ctx context.Context) (*learnerProgress, error) {
progress := &learnerProgress{}
value, err := e.client.Get(ctx, learnerProgressKey)
if err != nil {
return nil, err
}
if value.Count < 1 {
return progress, nil
}
if err := json.NewDecoder(bytes.NewBuffer(value.Kvs[0].Value)).Decode(progress); err != nil {
return nil, err
}
return progress, nil
}
// setLearnerProgress stores the learnerProgress struct to etcd
func (e *ETCD) setLearnerProgress(ctx context.Context, status *learnerProgress) error {
w := &bytes.Buffer{}
if err := json.NewEncoder(w).Encode(status); err != nil {
return err
}
_, err := e.client.Put(ctx, learnerProgressKey, w.String())
return err
}
// clientURLs returns a list of all non-learner etcd cluster member client access URLs
func ClientURLs(ctx context.Context, clientAccessInfo *clientaccess.Info, selfIP string) ([]string, Members, error) {
var memberList Members
resp, err := clientAccessInfo.Get("/db/info")
if err != nil {
return nil, memberList, err
}
if err := json.Unmarshal(resp, &memberList); err != nil {
return nil, memberList, err
}
ip, err := GetAdvertiseAddress(selfIP)
if err != nil {
return nil, memberList, err
}
var clientURLs []string
members:
for _, member := range memberList.Members {
// excluding learner member from the client list
if member.IsLearner {
continue
}
for _, clientURL := range member.ClientURLs {
u, err := url.Parse(clientURL)
if err != nil {
continue
}
if u.Hostname() == ip {
continue members
}
}
clientURLs = append(clientURLs, member.ClientURLs...)
}
return clientURLs, memberList, nil
}
// snapshotDir ensures that the snapshot directory exists, and then returns its path.
func snapshotDir(config *config.Control, create bool) (string, error) {
if config.EtcdSnapshotDir == "" {
// we have to create the snapshot dir if we are using
// the default snapshot dir if it doesn't exist
defaultSnapshotDir := filepath.Join(config.DataDir, "db", "snapshots")
s, err := os.Stat(defaultSnapshotDir)
if err != nil {
if create && os.IsNotExist(err) {
if err := os.MkdirAll(defaultSnapshotDir, 0700); err != nil {
return "", err
}
return defaultSnapshotDir, nil
}
return "", err
}
if s.IsDir() {
return defaultSnapshotDir, nil
}
}
return config.EtcdSnapshotDir, nil
}
// preSnapshotSetup checks to see if the necessary components are in place
// to perform an Etcd snapshot. This is necessary primarily for on-demand
// snapshots since they're performed before normal Etcd setup is completed.
func (e *ETCD) preSnapshotSetup(ctx context.Context, config *config.Control) error {
if e.client == nil {
if e.config == nil {
e.config = config
}
client, err := GetClient(ctx, e.config.Runtime, endpoint)
if err != nil {
return err
}
e.client = client
}
if e.runtime == nil {
e.runtime = config.Runtime
}
return nil
}
// Snapshot attempts to save a new snapshot to the configured directory, and then clean up any old
// snapshots in excess of the retention limits. This method is used in the internal cron snapshot
// system as well as used to do on-demand snapshots.
func (e *ETCD) Snapshot(ctx context.Context, config *config.Control) error {
if err := e.preSnapshotSetup(ctx, config); err != nil {
return err
}
status, err := e.client.Status(ctx, endpoint)
if err != nil {
return errors.Wrap(err, "failed to check etcd status for snapshot")
}
if status.IsLearner {
logrus.Warnf("Skipping snapshot: not supported for learner")
return nil
}
snapshotDir, err := snapshotDir(e.config, true)
if err != nil {
return errors.Wrap(err, "failed to get the snapshot dir")
}
cfg, err := getClientConfig(ctx, e.runtime, endpoint)
if err != nil {
return errors.Wrap(err, "failed to get config for etcd snapshot")
}
nodeName := os.Getenv("NODE_NAME")
snapshotName := fmt.Sprintf("%s-%s-%d", e.config.EtcdSnapshotName, nodeName, time.Now().Unix())
snapshotPath := filepath.Join(snapshotDir, snapshotName)
logrus.Infof("Saving etcd snapshot to %s", snapshotPath)
if err := snapshot.NewV3(nil).Save(ctx, *cfg, snapshotPath); err != nil {
return errors.Wrap(err, "failed to save snapshot")
}
if e.config.EtcdS3 {
logrus.Infof("Saving etcd snapshot %s to S3", snapshotName)
if err := e.initS3IfNil(ctx); err != nil {
return err
}
if err := e.s3.upload(ctx, snapshotPath); err != nil {
return err
}
logrus.Infof("S3 upload complete for %s", snapshotName)
if e.config.EtcdSnapshotRetention >= 1 {
if err := e.s3.snapshotRetention(ctx); err != nil {
return errors.Wrap(err, "failed to apply s3 snapshot retention")
}
}
}
// check if we need to perform a retention check
if e.config.EtcdSnapshotRetention >= 1 {
if err := snapshotRetention(e.config.EtcdSnapshotRetention, e.config.EtcdSnapshotName, snapshotDir); err != nil {
return errors.Wrap(err, "failed to apply snapshot retention")
}
}
return e.StoreSnapshotData(ctx)
}
type s3Config struct {
Endpoint string `json:"endpoint,omitempty"`
EndpointCA string `json:"endpointCA,omitempty"`
SkipSSLVerify bool `json:"skipSSLVerify,omitempty"`
Bucket string `json:"bucket,omitempty"`
Region string `json:"region,omitempty"`
Folder string `json:"folder,omitempty"`
Insecure bool `json:"insecure,omitempty"`
}
// SnapshotFile represents a single snapshot and it's
// metadata.
type SnapshotFile struct {
Name string `json:"name"`
// Location contains the full path of the snapshot. For
// local paths, the location will be prefixed with "file://".
Location string `json:"location,omitempty"`
NodeName string `json:"nodeName,omitempty"`
CreatedAt *metav1.Time `json:"createdAt,omitempty"`
Size int64 `json:"size,omitempty"`
S3 *s3Config `json:"s3Config,omitempty"`
}
// listSnapshots provides a list of the currently stored
// snapshots on disk or in S3 along with their relevant
// metadata.
func (e *ETCD) listSnapshots(ctx context.Context, snapshotDir string) ([]SnapshotFile, error) {
var snapshots []SnapshotFile
if e.config.EtcdS3 {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := e.initS3IfNil(ctx); err != nil {
return nil, err
}
var loo minio.ListObjectsOptions
if e.config.EtcdS3Folder != "" {
loo = minio.ListObjectsOptions{
Prefix: e.config.EtcdS3Folder,
Recursive: true,
}
}
objects := e.s3.client.ListObjects(ctx, e.config.EtcdS3BucketName, loo)
for obj := range objects {
if obj.Err != nil {
return nil, obj.Err
}
if obj.Size == 0 {
continue
}
ca, err := time.Parse(time.RFC3339, obj.LastModified.Format(time.RFC3339))
if err != nil {
return nil, err
}
snapshots = append(snapshots, SnapshotFile{
Name: filepath.Base(obj.Key),
NodeName: "s3",
CreatedAt: &metav1.Time{
Time: ca,
},
Size: obj.Size,
S3: &s3Config{
Endpoint: e.config.EtcdS3Endpoint,
EndpointCA: e.config.EtcdS3EndpointCA,
SkipSSLVerify: e.config.EtcdS3SkipSSLVerify,
Bucket: e.config.EtcdS3BucketName,
Region: e.config.EtcdS3Region,
Folder: e.config.EtcdS3Folder,
Insecure: e.config.EtcdS3Insecure,
},
})
}
return snapshots, nil
}
files, err := ioutil.ReadDir(snapshotDir)
if err != nil {
return nil, err
}
nodeName := os.Getenv("NODE_NAME")
for _, f := range files {
snapshots = append(snapshots, SnapshotFile{
Name: f.Name(),
Location: "file://" + filepath.Join(snapshotDir, f.Name()),
NodeName: nodeName,
CreatedAt: &metav1.Time{
Time: f.ModTime(),
},
Size: f.Size(),
})
}
return snapshots, nil
}
// initS3IfNil initializes the S3 client
// if it hasn't yet been initialized.
func (e *ETCD) initS3IfNil(ctx context.Context) error {
if e.s3 == nil {
s3, err := NewS3(ctx, e.config)
if err != nil {
return err
}
e.s3 = s3
}
return nil
}
// PruneSnapshots perfrorms a retention run with the given
// retention duration and removes expired snapshots.
func (e *ETCD) PruneSnapshots(ctx context.Context) error {
snapshotDir, err := snapshotDir(e.config, false)
if err != nil {
return errors.Wrap(err, "failed to get the snapshot dir")
}
if e.config.EtcdS3 {
if e.initS3IfNil(ctx); err != nil {
return err
}
return e.s3.snapshotRetention(ctx)
}
return snapshotRetention(e.config.EtcdSnapshotRetention, e.config.EtcdSnapshotName, snapshotDir)
}
// ListSnapshots is an exported wrapper method that wraps an
// unexported method of the same name.
func (e *ETCD) ListSnapshots(ctx context.Context) ([]SnapshotFile, error) {
snapshotDir, err := snapshotDir(e.config, false)
if err != nil {
return nil, errors.Wrap(err, "failed to get the snapshot dir")
}
return e.listSnapshots(ctx, snapshotDir)
}
// deleteSnapshots removes the given snapshots from
// either local storage or S3.
func (e *ETCD) DeleteSnapshots(ctx context.Context, snapshots []string) error {
snapshotDir, err := snapshotDir(e.config, false)
if err != nil {
return errors.Wrap(err, "failed to get the snapshot dir")
}
if e.config.EtcdS3 {
logrus.Info("Removing the given etcd snapshot(s) from S3")
logrus.Debugf("Removing the given etcd snapshot(s) from S3: %v", snapshots)
if e.initS3IfNil(ctx); err != nil {
return err
}
objectsCh := make(chan minio.ObjectInfo)
ctx, cancel := context.WithTimeout(ctx, defaultS3OpTimeout)
defer cancel()
go func() {
defer close(objectsCh)
opts := minio.ListObjectsOptions{
Recursive: true,
}
for obj := range e.s3.client.ListObjects(ctx, e.config.EtcdS3BucketName, opts) {
if obj.Err != nil {
logrus.Error(obj.Err)
return
}
// iterate through the given snapshots and only
// add them to the channel for remove if they're
// actually found from the bucket listing.
for _, snapshot := range snapshots {
if snapshot == obj.Key {
objectsCh <- obj
}
}
}
}()
for {
select {
case <-ctx.Done():
logrus.Errorf("Unable to delete snapshot: %v", ctx.Err())
return e.StoreSnapshotData(ctx)
case <-time.After(time.Millisecond * 100):
continue
case err, ok := <-e.s3.client.RemoveObjects(ctx, e.config.EtcdS3BucketName, objectsCh, minio.RemoveObjectsOptions{}):
if err.Err != nil {
logrus.Errorf("Unable to delete snapshot: %v", err.Err)
}
if !ok {
return e.StoreSnapshotData(ctx)
}
}
}
}
logrus.Info("Removing the given locally stored etcd snapshot(s)")
logrus.Debugf("Attempting to remove the given locally stored etcd snapshot(s): %v", snapshots)
for _, s := range snapshots {
// check if the given snapshot exists. If it does,
// remove it, otherwise continue.
sf := filepath.Join(snapshotDir, s)
if _, err := os.Stat(sf); os.IsNotExist(err) {
logrus.Infof("Snapshot %s, does not exist", s)
continue
}
if err := os.Remove(sf); err != nil {
return err
}
logrus.Debug("Removed snapshot ", s)
}
return e.StoreSnapshotData(ctx)
}
// updateSnapshotData populates the given map with the contents of the given slice.
func updateSnapshotData(data map[string]string, snapshotFiles []SnapshotFile) error {
for _, v := range snapshotFiles {
b, err := json.Marshal(v)
if err != nil {
return err
}
data[v.Name] = string(b)
}
return nil
}
// StoreSnapshotData stores the given snapshot data in the "snapshots" ConfigMap.
func (e *ETCD) StoreSnapshotData(ctx context.Context) error {
logrus.Infof("Saving current etcd snapshot set to %s ConfigMap", snapshotConfigMapName)
snapshotDir, err := snapshotDir(e.config, true)
if err != nil {
return errors.Wrap(err, "failed to get the snapshot dir")
}
return retry.OnError(retry.DefaultBackoff, func(err error) bool {
return apierrors.IsConflict(err) || apierrors.IsAlreadyExists(err)
}, func() error {
// make sure the core.Factory is initialize. There can
// be a race between this core code startup.
for e.config.Runtime.Core == nil {
runtime.Gosched()
}
snapshotConfigMap, getErr := e.config.Runtime.Core.Core().V1().ConfigMap().Get(metav1.NamespaceSystem, snapshotConfigMapName, metav1.GetOptions{})
snapshotFiles, err := e.listSnapshots(ctx, snapshotDir)
if err != nil {
return err
}
data := make(map[string]string, len(snapshotFiles))
if err := updateSnapshotData(data, snapshotFiles); err != nil {
return err
}
if apierrors.IsNotFound(getErr) {
cm := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: snapshotConfigMapName,
Namespace: metav1.NamespaceSystem,
},
Data: data,
}
_, err := e.config.Runtime.Core.Core().V1().ConfigMap().Create(&cm)
return err
}
if snapshotConfigMap.Data == nil {
snapshotConfigMap.Data = make(map[string]string)
}
nodeName := os.Getenv("NODE_NAME")
// remove entries for this node only
for k, v := range snapshotConfigMap.Data {
var sf SnapshotFile
if err := json.Unmarshal([]byte(v), &sf); err != nil {
return err
}
if sf.NodeName == nodeName || sf.NodeName == "s3" {
delete(snapshotConfigMap.Data, k)
}
}
// save this node's entries to the ConfigMap
for k, v := range data {
snapshotConfigMap.Data[k] = v
}
_, err = e.config.Runtime.Core.Core().V1().ConfigMap().Update(snapshotConfigMap)
return err
})
}
// setSnapshotFunction schedules snapshots at the configured interval.
func (e *ETCD) setSnapshotFunction(ctx context.Context) {
e.cron.AddFunc(e.config.EtcdSnapshotCron, func() {
if err := e.Snapshot(ctx, e.config); err != nil {
logrus.Error(err)
}
})
}
// Restore performs a restore of the ETCD datastore from
// the given snapshot path. This operation exists upon
// completion.
func (e *ETCD) Restore(ctx context.Context) error {
// check the old etcd data dir
oldDataDir := etcdDBDir(e.config) + "-old-" + strconv.Itoa(int(time.Now().Unix()))
if e.config.ClusterResetRestorePath == "" {
return errors.New("no etcd restore path was specified")
}
// make sure snapshot exists before restoration
if _, err := os.Stat(e.config.ClusterResetRestorePath); err != nil {
return err
}
// move the data directory to a temp path
if err := os.Rename(etcdDBDir(e.config), oldDataDir); err != nil {
return err
}
logrus.Infof("Pre-restore etcd database moved to %s", oldDataDir)
return snapshot.NewV3(nil).Restore(snapshot.RestoreConfig{
SnapshotPath: e.config.ClusterResetRestorePath,
Name: e.name,
OutputDataDir: etcdDBDir(e.config),
OutputWALDir: walDir(e.config),
PeerURLs: []string{e.peerURL()},
InitialCluster: e.name + "=" + e.peerURL(),
})
}
// snapshotRetention iterates through the snapshots and removes the oldest
// leaving the desired number of snapshots.
func snapshotRetention(retention int, snapshotPrefix string, snapshotDir string) error {
nodeName := os.Getenv("NODE_NAME")
var snapshotFiles []os.FileInfo
if err := filepath.Walk(snapshotDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if strings.HasPrefix(info.Name(), snapshotPrefix+"-"+nodeName) {
snapshotFiles = append(snapshotFiles, info)
}
return nil
}); err != nil {
return err
}
if len(snapshotFiles) <= retention {
return nil
}
sort.Slice(snapshotFiles, func(i, j int) bool {
return snapshotFiles[i].Name() < snapshotFiles[j].Name()
})
delCount := len(snapshotFiles) - retention
for _, df := range snapshotFiles[:delCount] {
if err := os.Remove(filepath.Join(snapshotDir, df.Name())); err != nil {
return err
}
}
return nil
}
// backupDirWithRetention will move the dir to a backup dir
// and will keep only maxBackupRetention of dirs.
func backupDirWithRetention(dir string, maxBackupRetention int) (string, error) {
backupDir := dir + "-backup-" + strconv.Itoa(int(time.Now().Unix()))
if _, err := os.Stat(dir); err != nil {
return "", nil
}
files, err := ioutil.ReadDir(filepath.Dir(dir))
if err != nil {
return "", err
}
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().After(files[j].ModTime())
})
count := 0
for _, f := range files {
if strings.HasPrefix(f.Name(), filepath.Base(dir)+"-backup") && f.IsDir() {
count++
if count > maxBackupRetention {
if err := os.RemoveAll(filepath.Join(filepath.Dir(dir), f.Name())); err != nil {
return "", err
}
}
}
}
// move the directory to a temp path
if err := os.Rename(dir, backupDir); err != nil {
return "", err
}
return backupDir, nil
}
// GetAPIServerURLFromETCD will try to fetch the version.Program/apiaddresses key from etcd
// when it succeed it will parse the first address in the list and return back an address
func GetAPIServerURLFromETCD(ctx context.Context, cfg *config.Control) (string, error) {
if cfg.Runtime == nil {
return "", fmt.Errorf("runtime is not ready yet")
}
cl, err := GetClient(ctx, cfg.Runtime, endpoint)
if err != nil {
return "", err
}
etcdResp, err := cl.KV.Get(ctx, AddressKey)
if err != nil {
return "", err
}
if etcdResp.Count < 1 {
return "", fmt.Errorf("servers addresses are not yet set")
}
var addresses []string
if err := json.Unmarshal(etcdResp.Kvs[0].Value, &addresses); err != nil {
return "", fmt.Errorf("failed to unmarshal etcd key: %v", err)
}
return addresses[0], nil
}
// GetMembersClientURLs will list through the member lists in etcd and return
// back a combined list of client urls for each member in the cluster
func (e *ETCD) GetMembersClientURLs(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
return nil, err
}
var memberUrls []string
for _, member := range members.Members {
for _, clientURL := range member.ClientURLs {
memberUrls = append(memberUrls, string(clientURL))
}
}
return memberUrls, nil
}
// GetMembersNames will list through the member lists in etcd and return
// back a combined list of member names
func (e *ETCD) GetMembersNames(ctx context.Context) ([]string, error) {
ctx, cancel := context.WithTimeout(ctx, testTimeout)
defer cancel()
members, err := e.client.MemberList(ctx)
if err != nil {
return nil, err
}
var memberNames []string
for _, member := range members.Members {
memberNames = append(memberNames, member.Name)
}
return memberNames, nil
}
// RemoveSelf will remove the member if it exists in the cluster
func (e *ETCD) RemoveSelf(ctx context.Context) error {
if err := e.RemovePeer(ctx, e.name, e.address, true); err != nil {
return err
}
// backup the data dir to avoid issues when re-enabling etcd
oldDataDir := etcdDBDir(e.config) + "-old-" + strconv.Itoa(int(time.Now().Unix()))
// move the data directory to a temp path
return os.Rename(etcdDBDir(e.config), oldDataDir)
}
| 1 | 10,154 | How will this code behave with hostnames that contain hyphens? | k3s-io-k3s | go |
@@ -735,6 +735,15 @@ namespace pwiz.Skyline.Model
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagSMILES, smiles);
}
+ var keggCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idKEGG);
+ var kegg = NullForEmpty(row.GetCell(keggCol));
+ if (kegg != null)
+ {
+ // Should have form like C07481 but we'll accept anything for now, having no proper parser
+ kegg = kegg.Trim();
+ moleculeIdKeys.Add(MoleculeAccessionNumbers.TagKEGG, kegg);
+ }
+
return !moleculeIdKeys.Any()
? MoleculeAccessionNumbers.EMPTY
: new MoleculeAccessionNumbers(moleculeIdKeys); | 1 | /*
* Original author: Brian Pratt <bspratt .at. proteinms.net>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2016 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Threading;
using pwiz.Common.Chemistry;
using pwiz.Common.SystemUtil;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.EditUI;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline.Model
{
/// <summary>
/// Read a small molecule transition list in CSV form, where header values are restricted to
/// those found in SmallMoleculeTransitionListColumnHeaders.KnownHeaders()
/// </summary>
public abstract class SmallMoleculeTransitionListReader
{
protected IFormatProvider _cultureInfo;
protected List<Row> Rows { get; set; }
public abstract void UpdateCellBackingStore(int row, int col, object value);
public abstract void ShowTransitionError(PasteError error);
public abstract int ColumnIndex(string columnName);
private double MzMatchTolerance { get; set; }
protected SmallMoleculeTransitionListReader()
{
Rows = new List<Row>();
}
public class Row
{
public int Index { get; private set; }
private SmallMoleculeTransitionListReader _parent;
protected List<string> _cells { get; set; }
public Row(SmallMoleculeTransitionListReader parent, int index, List<string> cells)
{
_parent = parent;
Index = index;
_cells = cells;
}
public void UpdateCell(int col, object value)
{
if (col < 0)
return;
while (_cells.Count < col)
{
_cells.Add(null);
}
_cells[col] = Convert.ToString(value, _parent._cultureInfo); // Update local copy
_parent.UpdateCellBackingStore(Index, col, value); // Update gridviewcontrol etc
}
public string GetCell(int index)
{
return index >= 0 ? _cells[index] : null;
}
public bool GetCellAsDouble(int index, out double val)
{
return Double.TryParse(GetCell(index), NumberStyles.Float, _parent._cultureInfo, out val);
}
public void SetCell(int index, string value)
{
if (index >= 0)
_cells[index] = value;
}
}
private bool RowHasDistinctProductValue(Row row, int productCol, int precursorCol)
{
var productVal = row.GetCell(productCol);
return !string.IsNullOrEmpty(productVal) && !Equals(productVal, row.GetCell(precursorCol));
}
public SrmDocument CreateTargets(SrmDocument document, IdentityPath to, out IdentityPath firstAdded)
{
firstAdded = null;
var precursorNamesSeen = document.CustomMolecules.Select(mol => mol.CustomMolecule.Name)
.Where(n => !string.IsNullOrEmpty(n)).ToHashSet();
var groupNamesSeen = document.MoleculeGroups.Select(group => group.Name)
.Where(n => !string.IsNullOrEmpty(n)).ToHashSet();
MzMatchTolerance = document.Settings.TransitionSettings.Instrument.MzMatchTolerance;
// We will accept a completely empty product list as meaning
// "these are all precursor transitions"
var requireProductInfo = false;
var hasAnyMoleculeMz = Rows.Any(row => !string.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_MZ)));
var hasAnyMoleculeFormula = Rows.Any(row => !string.IsNullOrEmpty(row.GetCell(INDEX_MOLECULE_FORMULA)));
var hasAnyMoleculeCharge = Rows.Any(row => !string.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_CHARGE)));
var hasAnyMoleculeAdduct = Rows.Any(row => !string.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_ADDUCT)));
foreach (var row in Rows)
{
if ((hasAnyMoleculeMz && RowHasDistinctProductValue(row, INDEX_PRODUCT_MZ, INDEX_PRECURSOR_MZ)) ||
(hasAnyMoleculeFormula &&
RowHasDistinctProductValue(row, INDEX_PRODUCT_FORMULA, INDEX_MOLECULE_FORMULA)) ||
(hasAnyMoleculeCharge &&
RowHasDistinctProductValue(row, INDEX_PRODUCT_CHARGE, INDEX_PRECURSOR_CHARGE)) ||
(hasAnyMoleculeAdduct &&
RowHasDistinctProductValue(row, INDEX_PRODUCT_ADDUCT, INDEX_PRECURSOR_ADDUCT)))
{
requireProductInfo = true; // Product list is not completely empty, or not just precursors
break;
}
// More expensive check to see whether calculated precursor mz matches any declared product mz
var precursor = ReadPrecursorOrProductColumns(document, row, null); // Get precursor values
if (precursor != null)
{
try
{
var product = ReadPrecursorOrProductColumns(document, row, precursor); // Get product values, if available
if (product != null && (Math.Abs(precursor.Mz.Value - product.Mz.Value) > MzMatchTolerance))
{
requireProductInfo = true; // Product list is not completely empty, or not just precursors
break;
}
}
catch (LineColNumberedIoException)
{
// No product info to be had in this line (so this is a precursor) but there may be others, keep looking
}
}
}
string defaultPepGroupName = null;
var docStart = document;
document = document.BeginDeferSettingsChanges(); // Prevents excessive calls to SetDocumentType etc
// For each row in the grid, add to or begin MoleculeGroup|Molecule|TransitionList tree
foreach (var row in Rows)
{
var precursor = ReadPrecursorOrProductColumns(document, row, null); // Get molecule values
if (precursor == null)
return null;
if (requireProductInfo && ReadPrecursorOrProductColumns(document, row, precursor) == null)
{
return null;
}
var groupName = row.GetCell(INDEX_MOLECULE_GROUP);
// Preexisting molecule group?
bool pepGroupFound = false;
if (string.IsNullOrEmpty(groupName) || !groupNamesSeen.Add(groupName)) // If group name is unique (so far), no need to search document for it
{
var adduct = precursor.Adduct;
var precursorMonoMz = adduct.MzFromNeutralMass(precursor.MonoMass);
var precursorAverageMz = adduct.MzFromNeutralMass(precursor.AverageMass);
if (string.IsNullOrEmpty(groupName))
{
groupName = defaultPepGroupName;
}
foreach (var pepGroup in document.MoleculeGroups)
{
if (Equals(pepGroup.Name, groupName))
{
// Found a molecule group with the same name - can we find an existing transition group to which we can add a transition?
pepGroupFound = true;
var pathPepGroup = new IdentityPath(pepGroup.Id);
bool pepFound = false;
if (string.IsNullOrEmpty(precursor.Name) || !precursorNamesSeen.Add(precursor.Name)) // If precursor name is unique (so far), no need to hunt for other occurences in the doc we're building
{
foreach (var pep in pepGroup.SmallMolecules)
{
// Match existing molecule if same name
if (!string.IsNullOrEmpty(precursor.Name))
{
pepFound =
Equals(pep.CustomMolecule.Name,
precursor.Name); // If user says they're the same, believe them unless accession numbers disagree
if (pepFound && !pep.CustomMolecule.AccessionNumbers.IsEmpty && !precursor.MoleculeAccessionNumbers.IsEmpty)
{
// We've seen HMDB entries with different forumlas but identical names (e.g. HMDB0013124 and HMDB0013125)
pepFound = Equals(pep.CustomMolecule.AccessionNumbers, precursor.MoleculeAccessionNumbers);
}
}
else // If no names, look to other cues
{
var ionMonoMz =
adduct.MzFromNeutralMass(pep.CustomMolecule.MonoisotopicMass, MassType.Monoisotopic);
var ionAverageMz =
adduct.MzFromNeutralMass(pep.CustomMolecule.AverageMass, MassType.Average);
var labelType = precursor.IsotopeLabelType ?? IsotopeLabelType.light;
// Match existing molecule if same formula or identical formula when stripped of labels
pepFound |= !string.IsNullOrEmpty(pep.CustomMolecule.Formula) &&
(Equals(pep.CustomMolecule.Formula, precursor.NeutralFormula) ||
Equals(pep.CustomMolecule.Formula, precursor.Formula) ||
Equals(pep.CustomMolecule.UnlabeledFormula,
BioMassCalc.MONOISOTOPIC.StripLabelsFromFormula(precursor
.NeutralFormula)) ||
Equals(pep.CustomMolecule.UnlabeledFormula, precursor.UnlabeledFormula));
// Match existing molecule if similar m/z at the precursor charge
pepFound |= Math.Abs(ionMonoMz - precursorMonoMz) <= MzMatchTolerance &&
Math.Abs(ionAverageMz - precursorAverageMz) <=
MzMatchTolerance; // (we don't just check mass since we don't have a tolerance value for that)
// Or no formula, and different isotope labels or matching label and mz
pepFound |= string.IsNullOrEmpty(pep.CustomMolecule.Formula) &&
string.IsNullOrEmpty(precursor.Formula) &&
(!pep.TransitionGroups.Any(t => Equals(t.TransitionGroup.LabelType,
labelType)) || // First label of this kind
pep.TransitionGroups.Any(
t => Equals(t.TransitionGroup.LabelType,
labelType) && // Already seen this label, and
Math.Abs(precursor.Mz - t.PrecursorMz) <=
MzMatchTolerance)); // Matches precursor mz of similar labels
}
if (pepFound)
{
bool tranGroupFound = false;
var pepPath = new IdentityPath(pathPepGroup, pep.Id);
foreach (var tranGroup in pep.TransitionGroups)
{
var pathGroup = new IdentityPath(pepPath, tranGroup.Id);
if (Math.Abs(tranGroup.PrecursorMz - precursor.Mz) <= MzMatchTolerance)
{
tranGroupFound = true;
var tranFound = false;
string errmsg = null;
try
{
var tranNode = GetMoleculeTransition(document, row, pep.Peptide,
tranGroup.TransitionGroup, requireProductInfo);
if (tranNode == null)
return null;
foreach (var tran in tranGroup.Transitions)
{
if (Equals(tranNode.Transition.CustomIon, tran.Transition.CustomIon))
{
tranFound = true;
break;
}
}
if (!tranFound)
{
document = (SrmDocument) document.Add(pathGroup, tranNode);
firstAdded = firstAdded ?? pathGroup;
}
}
catch (InvalidDataException x)
{
errmsg = x.Message;
}
catch (InvalidOperationException x) // Adduct handling code can throw these
{
errmsg = x.Message;
}
if (errmsg != null)
{
// Some error we didn't catch in the basic checks
ShowTransitionError(new PasteError
{
Column = 0,
Line = row.Index,
Message = errmsg
});
return null;
}
break;
}
}
if (!tranGroupFound)
{
var node =
GetMoleculeTransitionGroup(document, row, pep.Peptide, requireProductInfo);
if (node == null)
return null;
document = (SrmDocument) document.Add(pepPath, node);
firstAdded = firstAdded ?? pepPath;
}
break;
}
}
}
if (!pepFound)
{
var node = GetMoleculePeptide(document, row, pepGroup.PeptideGroup, requireProductInfo);
if (node == null)
return null;
document = (SrmDocument) document.Add(pathPepGroup, node);
firstAdded = firstAdded ?? pathPepGroup;
}
break;
}
}
}
if (!pepGroupFound)
{
var node = GetMoleculePeptideGroup(document, row, requireProductInfo);
if (node == null)
return null;
IdentityPath first;
IdentityPath next;
document = document.AddPeptideGroups(new[] {node}, false, to, out first, out next);
if (string.IsNullOrEmpty(defaultPepGroupName))
{
defaultPepGroupName = node.Name;
}
firstAdded = firstAdded ?? first;
if (!string.IsNullOrEmpty(precursor.Name))
precursorNamesSeen.Add(precursor.Name);
groupNamesSeen.Add(node.Name);
}
}
document = document.EndDeferSettingsChanges(docStart, null); // Process deferred calls to SetDocumentType etc
return document;
}
private int INDEX_MOLECULE_GROUP
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.moleculeGroup); }
}
private int INDEX_MOLECULE_NAME
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.namePrecursor); }
}
private int INDEX_PRODUCT_NAME
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.nameProduct); }
}
private int INDEX_MOLECULE_FORMULA
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.formulaPrecursor); }
}
private int INDEX_PRECURSOR_ADDUCT
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.adductPrecursor); }
}
private int INDEX_PRODUCT_FORMULA
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.formulaProduct); }
}
private int INDEX_PRODUCT_ADDUCT
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.adductProduct); }
}
private int INDEX_PRECURSOR_MZ
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.mzPrecursor); }
}
private int INDEX_PRODUCT_MZ
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.mzProduct); }
}
private int INDEX_PRECURSOR_CHARGE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.chargePrecursor); }
}
private int INDEX_PRODUCT_CHARGE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.chargeProduct); }
}
private int INDEX_LABEL_TYPE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.labelType); }
}
private int INDEX_RETENTION_TIME
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.rtPrecursor); }
}
private int INDEX_RETENTION_TIME_WINDOW
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.rtWindowPrecursor); }
}
private int INDEX_COLLISION_ENERGY
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.cePrecursor); }
}
private int INDEX_NOTE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.note); }
}
private int INDEX_PRECURSOR_DRIFT_TIME_MSEC
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.dtPrecursor); }
}
private int INDEX_HIGH_ENERGY_DRIFT_TIME_OFFSET_MSEC
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.dtHighEnergyOffset); }
}
private int INDEX_PRECURSOR_ION_MOBILITY
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.imPrecursor); }
}
private int INDEX_PRECURSOR_ION_MOBILITY_UNITS
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.imUnits); }
}
private int INDEX_HIGH_ENERGY_ION_MOBILITY_OFFSET
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.imHighEnergyOffset); }
}
private int INDEX_PRECURSOR_CCS
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.ccsPrecursor); }
}
private int INDEX_SLENS
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.slens); }
}
private int INDEX_CONE_VOLTAGE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.coneVoltage); }
}
private int INDEX_COMPENSATION_VOLTAGE
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.compensationVoltage); }
}
private int INDEX_DECLUSTERING_POTENTIAL
{
get { return ColumnIndex(SmallMoleculeTransitionListColumnHeaders.declusteringPotential); }
}
private static int? ValidateFormulaWithMz(SrmDocument document, ref string moleculeFormula, Adduct adduct,
TypedMass mz, int? charge, out TypedMass monoMass, out TypedMass averageMass, out double? mzCalc)
{
// Is the ion's formula the old style where user expected us to add a hydrogen?
var tolerance = document.Settings.TransitionSettings.Instrument.MzMatchTolerance;
int massShift;
var ion = new CustomIon(moleculeFormula);
monoMass = ion.GetMass(MassType.Monoisotopic);
averageMass = ion.GetMass(MassType.Average);
var mass = mz.IsMonoIsotopic()
? monoMass
: averageMass;
// Does given charge, if any, agree with mass and mz?
if (adduct.IsEmpty && charge.HasValue)
{
adduct = Adduct.NonProteomicProtonatedFromCharge(charge.Value);
}
mzCalc = adduct.AdductCharge != 0 ? adduct.MzFromNeutralMass(mass) : (double?) null;
if (mzCalc.HasValue && tolerance >= (Math.Abs(mzCalc.Value - mz)))
{
return charge;
}
int nearestCharge;
var calculatedCharge = TransitionCalc.CalcCharge(mass, mz, tolerance, true,
TransitionGroup.MIN_PRECURSOR_CHARGE,
TransitionGroup.MAX_PRECURSOR_CHARGE, new int[0],
TransitionCalc.MassShiftType.none, out massShift, out nearestCharge);
if (calculatedCharge.IsEmpty)
{
// That formula and this mz don't yield a reasonable charge state - try adding an H
var ion2 = new CustomMolecule(BioMassCalc.AddH(ion.FormulaWithAdductApplied));
monoMass = ion2.GetMass(MassType.Monoisotopic);
averageMass = ion2.GetMass(MassType.Average);
mass = (document.Settings.TransitionSettings.Prediction.FragmentMassType.IsMonoisotopic())
? monoMass
: averageMass;
calculatedCharge = TransitionCalc.CalcCharge(mass, mz, tolerance, true,
TransitionGroup.MIN_PRECURSOR_CHARGE,
TransitionGroup.MAX_PRECURSOR_CHARGE, new int[0], TransitionCalc.MassShiftType.none, out massShift,
out nearestCharge);
if (!calculatedCharge.IsEmpty)
{
moleculeFormula = ion2.Formula;
}
else
{
monoMass = TypedMass.ZERO_MONO_MASSNEUTRAL;
averageMass = TypedMass.ZERO_AVERAGE_MASSNEUTRAL;
}
}
charge = calculatedCharge.IsEmpty ? (int?) null : calculatedCharge.AdductCharge;
return charge;
}
private TypedMass ValidateFormulaWithCharge(MassType massType, string moleculeFormula, Adduct adduct,
out TypedMass monoMass, out TypedMass averageMass)
{
var ion = new CustomMolecule(moleculeFormula);
monoMass = ion.GetMass(MassType.Monoisotopic);
averageMass = ion.GetMass(MassType.Average);
return new TypedMass(adduct.MzFromNeutralMass(massType.IsMonoisotopic() ? monoMass : averageMass, massType), massType); // m/z is not actually a mass, of course, but mono vs avg is interesting
}
public static string NullForEmpty(string str)
{
if (str == null)
return null;
return (str.Length == 0) ? null : str;
}
private class ParsedIonInfo : IonInfo
{
public string Name { get; private set; }
public string Note { get; private set; }
public TypedMass Mz { get; private set; } // Not actually a mass, of course, but useful to know if its based on mono vs avg mass
public Adduct Adduct { get; private set; }
public TypedMass MonoMass { get; private set; }
public TypedMass AverageMass { get; private set; }
public IsotopeLabelType IsotopeLabelType { get; private set; }
public ExplicitRetentionTimeInfo ExplicitRetentionTime { get; private set; }
public ExplicitTransitionGroupValues ExplicitTransitionGroupValues { get; private set; }
public ExplicitTransitionValues ExplicitTransitionValues { get; private set; }
public MoleculeAccessionNumbers MoleculeAccessionNumbers { get; private set; } // InChiKey, CAS etc
public ParsedIonInfo(string name, string formula, Adduct adduct,
TypedMass mz, // Not actually a mass, of course, but still useful to know if based on Mono or Average mass
TypedMass monoMass,
TypedMass averageMass,
IsotopeLabelType isotopeLabelType,
ExplicitRetentionTimeInfo explicitRetentionTime,
ExplicitTransitionGroupValues explicitTransitionGroupValues,
ExplicitTransitionValues explicitTransitionValues,
string note,
MoleculeAccessionNumbers accessionNumbers) : base(formula)
{
Name = name;
Adduct = adduct;
Mz = mz;
MonoMass = monoMass;
AverageMass = averageMass;
IsotopeLabelType = isotopeLabelType;
ExplicitRetentionTime = explicitRetentionTime;
ExplicitTransitionGroupValues = explicitTransitionGroupValues;
ExplicitTransitionValues = explicitTransitionValues;
Note = note;
MoleculeAccessionNumbers = accessionNumbers;
}
public ParsedIonInfo ChangeNote(string note)
{
return ChangeProp(ImClone(this), im =>
{
im.Note = note;
});
}
public CustomMolecule ToCustomMolecule()
{
return new CustomMolecule(Formula, MonoMass, AverageMass, Name ?? string.Empty,
MoleculeAccessionNumbers);
}
}
private bool ValidateCharge(int? charge, bool getPrecursorColumns, out string errMessage)
{
var absCharge = Math.Abs(charge ?? 0);
if (getPrecursorColumns && absCharge != 0 && (absCharge < TransitionGroup.MIN_PRECURSOR_CHARGE ||
absCharge > TransitionGroup.MAX_PRECURSOR_CHARGE))
{
errMessage = String.Format(
Resources.Transition_Validate_Precursor_charge__0__must_be_non_zero_and_between__1__and__2__,
charge, -TransitionGroup.MAX_PRECURSOR_CHARGE, TransitionGroup.MAX_PRECURSOR_CHARGE);
return false;
}
else if (!getPrecursorColumns && absCharge != 0 &&
(absCharge < Transition.MIN_PRODUCT_CHARGE || absCharge > Transition.MAX_PRODUCT_CHARGE))
{
errMessage = String.Format(
Resources.Transition_Validate_Product_ion_charge__0__must_be_non_zero_and_between__1__and__2__,
charge, -Transition.MAX_PRODUCT_CHARGE, Transition.MAX_PRODUCT_CHARGE);
return false;
}
errMessage = null;
return true;
}
private MoleculeAccessionNumbers ReadMoleculeAccessionNumberColumns(Row row)
{
var moleculeIdKeys = new Dictionary<string, string>();
var inchikeyCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idInChiKey);
var inchikey = NullForEmpty(row.GetCell(inchikeyCol));
if (inchikey != null)
{
// Should have form like BQJCRHHNABKAKU-KBQPJGBKSA-N
inchikey = inchikey.Trim();
if (inchikey.Length != 27 || inchikey[14] != '-' || inchikey[25] != '-')
{
ShowTransitionError(new PasteError
{
Column = inchikeyCol,
Line = row.Index,
Message = string.Format(
Resources.SmallMoleculeTransitionListReader_ReadMoleculeIdColumns__0__is_not_a_valid_InChiKey_,
inchikey)
});
return null;
}
}
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagInChiKey, inchikey);
var hmdbCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idHMDB);
var hmdb = NullForEmpty(row.GetCell(hmdbCol));
if (hmdb != null)
{
// Should have form like HMDB0001, though we will accept just 00001
hmdb = hmdb.Trim();
if (!hmdb.StartsWith(MoleculeAccessionNumbers.TagHMDB) && !hmdb.All(char.IsDigit))
{
hmdb = MoleculeAccessionNumbers.TagHMDB + hmdb;
}
if ((hmdb.Length < 5) || !hmdb.Skip(4).All(char.IsDigit))
{
ShowTransitionError(new PasteError
{
Column = hmdbCol,
Line = row.Index,
Message =
string.Format(
Resources.SmallMoleculeTransitionListReader_ReadMoleculeIdColumns__0__is_not_a_valid_HMDB_identifier_,
hmdb)
});
return null;
}
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagHMDB, hmdb.Substring(4));
}
var inchiCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idInChi);
var inchi = NullForEmpty(row.GetCell(inchiCol));
if (inchi != null)
{
// Should have form like "InChI=1S/C4H8O3/c1-3(5)2-4(6)7/h3,5H,2H2,1H3,(H,6,7)/t3-/m1/s",
// though we will accept just "1S/C4H8O3/c1-3(5)2-4(6)7/h3,5H,2H2,1H3,(H,6,7)/t3-/m1/s"
inchi = inchi.Trim();
if (!inchi.StartsWith(MoleculeAccessionNumbers.TagInChI + @"="))
{
inchi = MoleculeAccessionNumbers.TagInChI + @"=" + inchi;
}
if (inchi.Length < 6 || inchi.Count(c => c == '/') < 2)
{
// CONSIDER(bspratt) more robust regex check on this?
ShowTransitionError(new PasteError
{
Column = inchiCol,
Line = row.Index,
Message =
string.Format(
Resources
.SmallMoleculeTransitionListReader_ReadMoleculeIdColumns__0__is_not_a_valid_InChI_identifier_,
inchi)
});
return null;
}
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagInChI, inchi.Substring(6));
}
var casCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idCAS);
var cas = NullForEmpty(row.GetCell(casCol));
if (cas != null)
{
// Should have form like "123-45-6",
var parts = cas.Trim().Split('-');
if (parts.Length != 3 || parts.Any(part => !part.All(char.IsDigit)))
{
ShowTransitionError(new PasteError
{
Column = casCol,
Line = row.Index,
Message =
string.Format(
Resources
.SmallMoleculeTransitionListReader_ReadMoleculeIdColumns__0__is_not_a_valid_CAS_registry_number_,
cas)
});
return null;
}
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagCAS, cas);
}
var smilesCol = ColumnIndex(SmallMoleculeTransitionListColumnHeaders.idSMILES);
var smiles = NullForEmpty(row.GetCell(smilesCol));
if (smiles != null)
{
// Should have form like CCc1nn(C)c2c(=O)[nH]c(nc12)c3cc(ccc3OCC)S(=O)(=O)N4CCN(C)CC4 but we'll accept anything for now, having no proper parser
smiles = smiles.Trim();
moleculeIdKeys.Add(MoleculeAccessionNumbers.TagSMILES, smiles);
}
return !moleculeIdKeys.Any()
? MoleculeAccessionNumbers.EMPTY
: new MoleculeAccessionNumbers(moleculeIdKeys);
}
public static eIonMobilityUnits IonMobilityUnitsFromAttributeValue(string xmlAttributeValue)
{
return string.IsNullOrEmpty(xmlAttributeValue) ?
eIonMobilityUnits.none :
TypeSafeEnum.Parse<eIonMobilityUnits>(xmlAttributeValue);
}
// Recognize XML attribute values, enum strings, and various other synonyms
public static readonly Dictionary<string, eIonMobilityUnits> IonMobilityUnitsSynonyms =
Enum.GetValues(typeof(eIonMobilityUnits)).Cast<eIonMobilityUnits>().ToDictionary(e => e.ToString(), e => e)
.Concat(new Dictionary<string, eIonMobilityUnits> {
{ @"msec", eIonMobilityUnits.drift_time_msec },
{ @"Vsec/cm2", eIonMobilityUnits.inverse_K0_Vsec_per_cm2 },
{ @"Vsec/cm^2", eIonMobilityUnits.inverse_K0_Vsec_per_cm2 },
{ @"1/K0", eIonMobilityUnits.inverse_K0_Vsec_per_cm2 }
}).ToDictionary(x => x.Key, x=> x.Value);
public static string GetAcceptedIonMobilityUnitsString()
{
return string.Join(@", ", IonMobilityUnitsSynonyms.Keys);
}
// We need some combination of:
// Formula and mz
// Formula and charge
// mz and charge
private ParsedIonInfo ReadPrecursorOrProductColumns(SrmDocument document,
Row row,
ParsedIonInfo precursorInfo)
{
var getPrecursorColumns = precursorInfo == null;
int indexName = getPrecursorColumns ? INDEX_MOLECULE_NAME : INDEX_PRODUCT_NAME;
int indexFormula = getPrecursorColumns ? INDEX_MOLECULE_FORMULA : INDEX_PRODUCT_FORMULA;
int indexAdduct = getPrecursorColumns ? INDEX_PRECURSOR_ADDUCT : INDEX_PRODUCT_ADDUCT;
int indexMz = getPrecursorColumns ? INDEX_PRECURSOR_MZ : INDEX_PRODUCT_MZ;
int indexCharge = getPrecursorColumns ? INDEX_PRECURSOR_CHARGE : INDEX_PRODUCT_CHARGE;
var name = NullForEmpty(row.GetCell(indexName));
var formula = NullForEmpty(row.GetCell(indexFormula));
var adductText = NullForEmpty(row.GetCell(indexAdduct));
var note = NullForEmpty(row.GetCell(INDEX_NOTE));
// TODO(bspratt) use CAS or HMDB etc lookup to fill in missing inchikey - and use any to fill in formula
var moleculeID = ReadMoleculeAccessionNumberColumns(row);
IsotopeLabelType isotopeLabelType = null;
bool badMz = false;
var mzType = getPrecursorColumns
? document.Settings.TransitionSettings.Prediction.PrecursorMassType
: document.Settings.TransitionSettings.Prediction.FragmentMassType;
double mzParsed;
if (!row.GetCellAsDouble(indexMz, out mzParsed))
{
if (!String.IsNullOrEmpty(row.GetCell(indexMz)))
{
badMz = true;
}
mzParsed = 0;
}
var mz = new TypedMass(mzParsed, mzType); // mz is not actually a mass, of course, but we want to track mass type it was calculated from
if ((mz < 0) || badMz)
{
ShowTransitionError(new PasteError
{
Column = indexMz,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_m_z_value__0_, row.GetCell(indexMz))
});
return null;
}
int? charge = null;
var adduct = Adduct.EMPTY;
int trycharge;
if (Int32.TryParse(row.GetCell(indexCharge), out trycharge))
charge = trycharge;
else if (!String.IsNullOrEmpty(row.GetCell(indexCharge)))
{
Adduct test;
if (Adduct.TryParse(row.GetCell(indexCharge), out test))
{
// Adduct formula in charge column, let's allow it
adduct = test;
charge = adduct.AdductCharge;
}
else
{
ShowTransitionError(new PasteError
{
Column = indexCharge,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_charge_value__0_, row.GetCell(indexCharge))
});
return null;
}
}
double dtmp;
double? collisionEnergy = null;
double? slens = null;
double? coneVoltage = null;
double? retentionTime = null;
double? retentionTimeWindow = null;
double? declusteringPotential = null;
double? compensationVoltage = null;
if (getPrecursorColumns)
{
// Do we have any molecule IDs?
moleculeID = ReadMoleculeAccessionNumberColumns(row);
if (moleculeID == null)
{
return null; // Some error occurred
}
var label = NullForEmpty(row.GetCell(INDEX_LABEL_TYPE));
if (label != null)
{
var typedMods = document.Settings.PeptideSettings.Modifications.GetModificationsByName(label);
if (typedMods == null)
{
ShowTransitionError(new PasteError
{
Column = INDEX_LABEL_TYPE,
Line = row.Index,
Message = string.Format(Resources.SrmDocument_ReadLabelType_The_isotope_modification_type__0__does_not_exist_in_the_document_settings, label)
});
return null;
}
isotopeLabelType = typedMods.LabelType;
}
if (row.GetCellAsDouble(INDEX_COMPENSATION_VOLTAGE, out dtmp))
compensationVoltage = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_COMPENSATION_VOLTAGE)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_COMPENSATION_VOLTAGE,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_compensation_voltage__0_, row.GetCell(INDEX_COMPENSATION_VOLTAGE))
});
return null;
}
if (row.GetCellAsDouble(INDEX_RETENTION_TIME, out dtmp))
retentionTime = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_RETENTION_TIME)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_RETENTION_TIME,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_retention_time_value__0_, row.GetCell(INDEX_RETENTION_TIME))
});
return null;
}
if (row.GetCellAsDouble(INDEX_RETENTION_TIME_WINDOW, out dtmp))
{
retentionTimeWindow = dtmp;
if (!retentionTime.HasValue)
{
ShowTransitionError(new PasteError
{
Column = INDEX_RETENTION_TIME_WINDOW,
Line = row.Index,
Message = Resources.Peptide_ExplicitRetentionTimeWindow_Explicit_retention_time_window_requires_an_explicit_retention_time_value_
});
return null;
}
}
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_RETENTION_TIME_WINDOW)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_RETENTION_TIME_WINDOW,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_retention_time_window_value__0_, row.GetCell(INDEX_RETENTION_TIME_WINDOW))
});
return null;
}
}
if (row.GetCellAsDouble(INDEX_COLLISION_ENERGY, out dtmp))
collisionEnergy = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_COLLISION_ENERGY)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_COLLISION_ENERGY,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_collision_energy_value__0_, row.GetCell(INDEX_COLLISION_ENERGY))
});
return null;
}
if (row.GetCellAsDouble(INDEX_SLENS, out dtmp))
slens = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_SLENS)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_SLENS,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_S_Lens_value__0_, row.GetCell(INDEX_SLENS))
});
return null;
}
if (row.GetCellAsDouble(INDEX_CONE_VOLTAGE, out dtmp))
coneVoltage = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_CONE_VOLTAGE)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_CONE_VOLTAGE,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_cone_voltage_value__0_, row.GetCell(INDEX_CONE_VOLTAGE))
});
return null;
}
if (row.GetCellAsDouble(INDEX_DECLUSTERING_POTENTIAL, out dtmp))
declusteringPotential = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_DECLUSTERING_POTENTIAL)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_DECLUSTERING_POTENTIAL,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_declustering_potential__0_, row.GetCell(INDEX_DECLUSTERING_POTENTIAL))
});
return null;
}
double? ionMobility = null;
var ionMobilityUnits = eIonMobilityUnits.none;
if (row.GetCellAsDouble(INDEX_PRECURSOR_DRIFT_TIME_MSEC, out dtmp))
{
ionMobility = dtmp;
ionMobilityUnits = eIonMobilityUnits.drift_time_msec;
}
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_DRIFT_TIME_MSEC)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_DRIFT_TIME_MSEC,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_drift_time_value__0_, row.GetCell(INDEX_PRECURSOR_DRIFT_TIME_MSEC))
});
return null;
}
double? ionMobilityHighEnergyOffset = null;
if (row.GetCellAsDouble(INDEX_HIGH_ENERGY_DRIFT_TIME_OFFSET_MSEC, out dtmp))
{
ionMobilityHighEnergyOffset = dtmp;
ionMobilityUnits = eIonMobilityUnits.drift_time_msec;
}
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_HIGH_ENERGY_DRIFT_TIME_OFFSET_MSEC)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_HIGH_ENERGY_DRIFT_TIME_OFFSET_MSEC,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_ReadPrecursorOrProductColumns_Invalid_drift_time_high_energy_offset_value__0_, row.GetCell(INDEX_HIGH_ENERGY_DRIFT_TIME_OFFSET_MSEC))
});
return null;
}
string unitsIM = row.GetCell(INDEX_PRECURSOR_ION_MOBILITY_UNITS);
if (!string.IsNullOrEmpty(unitsIM))
{
if (!IonMobilityUnitsSynonyms.TryGetValue(unitsIM.Trim(), out ionMobilityUnits))
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_ION_MOBILITY_UNITS,
Line = row.Index,
Message = String.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Invalid_ion_mobility_units_value__0___accepted_values_are__1__, row.GetCell(INDEX_PRECURSOR_ION_MOBILITY_UNITS), GetAcceptedIonMobilityUnitsString())
});
return null;
}
}
if (row.GetCellAsDouble(INDEX_PRECURSOR_ION_MOBILITY, out dtmp))
{
ionMobility = dtmp;
}
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_ION_MOBILITY)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_ION_MOBILITY,
Line = row.Index,
Message = String.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Invalid_ion_mobility_value__0_, row.GetCell(INDEX_PRECURSOR_ION_MOBILITY))
});
return null;
}
if (row.GetCellAsDouble(INDEX_HIGH_ENERGY_ION_MOBILITY_OFFSET, out dtmp))
{
ionMobilityHighEnergyOffset = dtmp;
}
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_HIGH_ENERGY_ION_MOBILITY_OFFSET)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_HIGH_ENERGY_ION_MOBILITY_OFFSET,
Line = row.Index,
Message = String.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Invalid_ion_mobility_high_energy_offset_value__0_, row.GetCell(INDEX_HIGH_ENERGY_ION_MOBILITY_OFFSET))
});
return null;
}
double? ccsPrecursor = precursorInfo == null ? null : precursorInfo.ExplicitTransitionGroupValues.CollisionalCrossSectionSqA;
if (row.GetCellAsDouble(INDEX_PRECURSOR_CCS, out dtmp))
ccsPrecursor = dtmp;
else if (!String.IsNullOrEmpty(row.GetCell(INDEX_PRECURSOR_CCS)))
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_CCS,
Line = row.Index,
Message = String.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Invalid_collisional_cross_section_value__0_, row.GetCell(INDEX_PRECURSOR_CCS))
});
return null;
}
string errMessage = String.Format(getPrecursorColumns
? Resources.PasteDlg_ValidateEntry_Error_on_line__0___Precursor_needs_values_for_any_two_of__Formula__m_z_or_Charge_
: Resources.PasteDlg_ValidateEntry_Error_on_line__0___Product_needs_values_for_any_two_of__Formula__m_z_or_Charge_, row.Index + 1);
// Do we have an adduct description? If so, pull charge from that.
if ((!string.IsNullOrEmpty(formula) && formula.Contains('[') && formula.Contains(']')) || !string.IsNullOrEmpty(adductText))
{
if (!string.IsNullOrEmpty(formula))
{
var parts = formula.Split('[');
var formulaAdduct = formula.Substring(parts[0].Length);
if (string.IsNullOrEmpty(adductText))
{
adductText = formulaAdduct;
}
else if (!string.IsNullOrEmpty(formulaAdduct) &&
// ReSharper disable LocalizableElement
!Equals(adductText.Replace("[", "").Replace("]", ""), formulaAdduct.Replace("[", "").Replace("]", "")))
// ReSharper restore LocalizableElement
{
ShowTransitionError(new PasteError
{
Column = indexAdduct,
Line = row.Index,
Message = Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Formula_already_contains_an_adduct_description__and_it_does_not_match_
});
return null;
}
formula = parts[0];
}
try
{
adduct = Adduct.FromStringAssumeChargeOnly(adductText);
IonInfo.ApplyAdductToFormula(formula??string.Empty, adduct); // Just to see if it throws
}
catch (InvalidOperationException x)
{
ShowTransitionError(new PasteError
{
Column = indexFormula,
Line = row.Index,
Message = x.Message
});
return null;
}
if (charge.HasValue && charge.Value != adduct.AdductCharge)
{
// Explict charge disagrees with adduct - is this because adduct charge is not recognized?
if (adduct.AdductCharge == 0)
{
// Update the adduct to contain the explicit charge
adduct = adduct.ChangeCharge(charge.Value);
}
else
{
ShowTransitionError(new PasteError
{
Column = indexAdduct >=0 ? indexAdduct : indexFormula,
Line = row.Index,
Message = string.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Adduct__0__charge__1__does_not_agree_with_declared_charge__2_, adductText, adduct.AdductCharge, charge.Value)
});
return null;
}
}
else
{
charge = adduct.AdductCharge;
}
if (!ValidateCharge(charge, getPrecursorColumns, out errMessage))
{
ShowTransitionError(new PasteError
{
Column = indexAdduct >=0 ? indexAdduct : indexFormula,
Line = row.Index,
Message = errMessage
});
return null;
}
}
int errColumn = indexFormula;
int countValues = 0;
if (charge.HasValue && charge.Value != 0)
{
countValues++;
if (adduct.IsEmpty)
{
// When no adduct is given, either it's implied (de)protonation, or formula is inherently charged. Formula and mz are a clue. Or it might be a precursor declaration.
try
{
if (precursorInfo != null && charge.Value.Equals(precursorInfo.Adduct.AdductCharge) && Math.Abs(mz - precursorInfo.Mz) <= MzMatchTolerance )
{
adduct = precursorInfo.Adduct; // Charge matches, mz matches, this is probably a precursor fragment declaration
}
else
{
adduct = DetermineAdductFromFormulaChargeAndMz(formula, charge.Value, mz);
}
}
catch (Exception e)
{
ShowTransitionError(new PasteError
{
Column = indexFormula >= 0 ? indexFormula : indexMz,
Line = row.Index,
Message = e.Message
});
return null;
}
row.SetCell(indexAdduct, adduct.AdductFormula);
}
}
if (mz > 0)
countValues++;
if (NullForEmpty(formula) != null)
countValues++;
if (countValues == 0 && !getPrecursorColumns &&
(string.IsNullOrEmpty(name) || Equals(precursorInfo.Name, name)))
{
// No product info found in this row, assume that this is a precursor declaration
return precursorInfo.ChangeNote(note);
}
if (countValues >= 2) // Do we have at least 2 of charge, mz, formula?
{
TypedMass monoMass;
TypedMass averageMmass;
if (ionMobility.HasValue && ionMobilityUnits == eIonMobilityUnits.none)
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_ION_MOBILITY,
Line = row.Index,
Message = Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Missing_ion_mobility_units
});
return null;
}
var retentionTimeInfo = retentionTime.HasValue
? new ExplicitRetentionTimeInfo(retentionTime.Value, retentionTimeWindow)
: null;
var explicitTransitionValues = ExplicitTransitionValues.Create(collisionEnergy,ionMobilityHighEnergyOffset, slens, coneVoltage, declusteringPotential);
if (compensationVoltage.HasValue)
{
ionMobility = compensationVoltage;
ionMobilityUnits = eIonMobilityUnits.compensation_V;
}
var explicitTransitionGroupValues = ExplicitTransitionGroupValues.Create(ionMobility, ionMobilityUnits, ccsPrecursor);
var massOk = true;
var massTooLow = false;
string massErrMsg = null;
if (!ValidateCharge(charge, getPrecursorColumns, out errMessage))
{
errColumn = indexCharge;
}
else if (NullForEmpty(formula) != null)
{
// We have a formula
try
{
// Can we infer a heavy label from the formula if none specified?
if (getPrecursorColumns && isotopeLabelType == null)
{
var ion = new IonInfo(formula, adduct);
if (!IonInfo.EquivalentFormulas(ion.FormulaWithAdductApplied, ion.UnlabeledFormula)) // Formula+adduct contained some heavy isotopes
{
isotopeLabelType = IsotopeLabelType.heavy;
if (INDEX_LABEL_TYPE >= 0)
{
row.UpdateCell(INDEX_LABEL_TYPE, isotopeLabelType.ToString());
}
}
}
// If formula contains isotope info, move it to the adduct
if (!adduct.IsEmpty)
{
var labels = BioMassCalc.MONOISOTOPIC.FindIsotopeLabelsInFormula(formula);
if (labels.Any())
{
adduct = adduct.ChangeIsotopeLabels(labels);
formula = BioMassCalc.MONOISOTOPIC.StripLabelsFromFormula(formula);
row.SetCell(indexFormula, formula);
row.SetCell(indexAdduct, adduct.AsFormulaOrSignedInt());
}
}
if (mz > 0)
{
// Is the ion's formula the old style where user expected us to add a hydrogen?
double? mzCalc;
charge = ValidateFormulaWithMz(document, ref formula, adduct, mz, charge, out monoMass, out averageMmass, out mzCalc);
row.SetCell(indexFormula, formula);
massOk = monoMass < CustomMolecule.MAX_MASS && averageMmass < CustomMolecule.MAX_MASS &&
!(massTooLow = charge.HasValue && (monoMass < CustomMolecule.MIN_MASS || averageMmass < CustomMolecule.MIN_MASS)); // Null charge => masses are 0 but meaningless
if (adduct.IsEmpty && charge.HasValue)
{
adduct = Adduct.FromChargeProtonated(charge);
}
if (massOk)
{
if (charge.HasValue)
{
row.UpdateCell(indexCharge, charge.Value);
return new ParsedIonInfo(name, formula, adduct, mz, monoMass, averageMmass, isotopeLabelType, retentionTimeInfo, explicitTransitionGroupValues, explicitTransitionValues, note, moleculeID);
}
else if (mzCalc.HasValue)
{
// There was an initial charge value, but it didn't make sense with formula and proposed mz
errMessage = String.Format(getPrecursorColumns
? Resources.PasteDlg_ReadPrecursorOrProductColumns_Error_on_line__0___Precursor_m_z__1__does_not_agree_with_value__2__as_calculated_from_ion_formula_and_charge_state__delta____3___Transition_Settings___Instrument___Method_match_tolerance_m_z____4_____Correct_the_m_z_value_in_the_table__or_leave_it_blank_and_Skyline_will_calculate_it_for_you_
: Resources.PasteDlg_ReadPrecursorOrProductColumns_Error_on_line__0___Product_m_z__1__does_not_agree_with_value__2__as_calculated_from_ion_formula_and_charge_state__delta____3___Transition_Settings___Instrument___Method_match_tolerance_m_z____4_____Correct_the_m_z_value_in_the_table__or_leave_it_blank_and_Skyline_will_calculate_it_for_you_,
row.Index + 1, (float)mz, (float)mzCalc.Value, (float)(mzCalc.Value - mz), (float)document.Settings.TransitionSettings.Instrument.MzMatchTolerance);
errColumn = indexMz;
}
else
{
// No charge state given, and mz makes no sense with formula
errMessage = String.Format(getPrecursorColumns
? Resources.PasteDlg_ValidateEntry_Error_on_line__0___Precursor_formula_and_m_z_value_do_not_agree_for_any_charge_state_
: Resources.PasteDlg_ValidateEntry_Error_on_line__0___Product_formula_and_m_z_value_do_not_agree_for_any_charge_state_, row.Index + 1);
errColumn = indexMz;
}
}
}
else if (charge.HasValue)
{
if (adduct.IsEmpty)
{
adduct = Adduct.FromChargeProtonated(charge);
}
// Get the mass from the formula, and mz from that and adduct
mz = ValidateFormulaWithCharge(mzType, formula, adduct, out monoMass, out averageMmass);
massOk = !((monoMass >= CustomMolecule.MAX_MASS || averageMmass >= CustomMolecule.MAX_MASS)) &&
!(massTooLow = (monoMass < CustomMolecule.MIN_MASS || averageMmass < CustomMolecule.MIN_MASS));
row.UpdateCell(indexMz, mz);
if (massOk)
return new ParsedIonInfo(name, formula, adduct, mz, monoMass, averageMmass, isotopeLabelType, retentionTimeInfo, explicitTransitionGroupValues, explicitTransitionValues, note, moleculeID);
}
}
catch (InvalidDataException x)
{
massErrMsg = x.Message;
}
catch (InvalidOperationException x) // Adduct handling code can throw these
{
massErrMsg = x.Message;
}
if (massErrMsg != null)
{
massOk = false;
}
}
else if (mz != 0 && !adduct.IsEmpty)
{
// No formula, just use charge and m/z
monoMass = adduct.MassFromMz(mz, MassType.Monoisotopic);
averageMmass = adduct.MassFromMz(mz, MassType.Average);
massOk = monoMass < CustomMolecule.MAX_MASS && averageMmass < CustomMolecule.MAX_MASS &&
!(massTooLow = (monoMass < CustomMolecule.MIN_MASS || averageMmass < CustomMolecule.MIN_MASS));
errColumn = indexMz;
if (massOk)
return new ParsedIonInfo(name, formula, adduct, mz, monoMass, averageMmass, isotopeLabelType, retentionTimeInfo, explicitTransitionGroupValues, explicitTransitionValues, note, moleculeID);
}
if (massTooLow)
{
errMessage = massErrMsg ?? String.Format(
Resources
.EditCustomMoleculeDlg_OkDialog_Custom_molecules_must_have_a_mass_greater_than_or_equal_to__0__,
CustomMolecule.MIN_MASS);
}
else if (!massOk)
{
errMessage = massErrMsg ?? String.Format(
Resources
.EditCustomMoleculeDlg_OkDialog_Custom_molecules_must_have_a_mass_less_than_or_equal_to__0__,
CustomMolecule.MAX_MASS);
}
}
if (string.IsNullOrEmpty(errMessage))
{
if (!string.IsNullOrEmpty(adduct.AdductFormula) && adduct.AdductCharge == 0)
{
// Adduct with unknown charge state
errMessage =
string.Format(Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_Cannot_derive_charge_from_adduct_description___0____Use_the_corresponding_Charge_column_to_set_this_explicitly__or_change_the_adduct_description_as_needed_, adduct.AdductFormula);
}
else
{
// Don't just leave it blank
errMessage = Resources.SmallMoleculeTransitionListReader_ReadPrecursorOrProductColumns_unknown_error;
}
}
ShowTransitionError(new PasteError
{
Column = errColumn,
Line = row.Index,
Message = errMessage
});
return null;
}
// When a charge but no adduct is given, either it's implied (de)protonation, or formula is inherently charged. Formula and mz are a clue.
private static Adduct DetermineAdductFromFormulaChargeAndMz(string formula, int charge, TypedMass mz)
{
Adduct adduct;
if (string.IsNullOrEmpty(formula))
{
adduct = Adduct.FromChargeNoMass(charge); // If all we have is mz, don't make guesses at proton gain or loss
}
else if (mz == 0)
{
adduct = Adduct.NonProteomicProtonatedFromCharge(charge); // Formula but no mz, just assume protonation
}
else
{
// Get mass from formula, then look at declared mz to decide if protonation is implied by charge
var adductH = Adduct.NonProteomicProtonatedFromCharge(charge); // [M-H] etc
var adductM = Adduct.FromChargeNoMass(charge); // [M-] etc
var ionH = new CustomMolecule(adductH.ApplyToFormula(formula));
var ionM = new CustomMolecule(adductM.ApplyToFormula(formula));
var mass = mz * Math.Abs(charge);
adduct = Math.Abs(ionH.GetMass(MassType.Monoisotopic) - mass) <
Math.Abs(ionM.GetMass(MassType.Monoisotopic) - mass)
? adductH
: adductM;
}
return adduct;
}
private PeptideGroupDocNode GetMoleculePeptideGroup(SrmDocument document, Row row, bool requireProductInfo)
{
var pepGroup = new PeptideGroup();
var pep = GetMoleculePeptide(document, row, pepGroup, requireProductInfo);
if (pep == null)
return null;
var name = row.GetCell(INDEX_MOLECULE_GROUP);
if (String.IsNullOrEmpty(name))
name = document.GetSmallMoleculeGroupId();
var metadata = new ProteinMetadata(name, String.Empty).SetWebSearchCompleted(); // FUTURE: some kind of lookup for small molecules
return new PeptideGroupDocNode(pepGroup, metadata, new[] { pep });
}
private PeptideDocNode GetMoleculePeptide(SrmDocument document, Row row, PeptideGroup group, bool requireProductInfo)
{
CustomMolecule molecule;
ParsedIonInfo parsedIonInfo;
try
{
parsedIonInfo = ReadPrecursorOrProductColumns(document, row, null); // Re-read the precursor columns
if (parsedIonInfo == null)
return null; // Some failure, but exception was already handled
// Identify items with same formula and different adducts
var neutralFormula = parsedIonInfo.NeutralFormula;
var shortName = row.GetCell(INDEX_MOLECULE_NAME);
if (!string.IsNullOrEmpty(neutralFormula))
{
molecule = new CustomMolecule(neutralFormula, shortName, parsedIonInfo.MoleculeAccessionNumbers);
}
else
{
molecule = new CustomMolecule(parsedIonInfo.Formula, parsedIonInfo.MonoMass, parsedIonInfo.AverageMass, shortName, parsedIonInfo.MoleculeAccessionNumbers);
}
}
catch (ArgumentException e)
{
ShowTransitionError(new PasteError
{
Column = INDEX_MOLECULE_FORMULA,
Line = row.Index,
Message = e.Message
});
return null;
}
try
{
var pep = new Peptide(molecule);
var tranGroup = GetMoleculeTransitionGroup(document, row, pep, requireProductInfo);
if (tranGroup == null)
return null;
return new PeptideDocNode(pep, document.Settings, null, null, parsedIonInfo.ExplicitRetentionTime, new[] { tranGroup }, true);
}
catch (InvalidOperationException e)
{
ShowTransitionError(new PasteError
{
Column = INDEX_MOLECULE_FORMULA,
Line = row.Index,
Message = e.Message
});
return null;
}
}
private TransitionGroupDocNode GetMoleculeTransitionGroup(SrmDocument document, Row row, Peptide pep, bool requireProductInfo)
{
var moleculeInfo = ReadPrecursorOrProductColumns(document, row, null); // Re-read the precursor columns
if (moleculeInfo == null)
{
return null; // Some parsing error, user has already been notified
}
if (!document.Settings.TransitionSettings.IsMeasurablePrecursor(moleculeInfo.Mz))
{
ShowTransitionError(new PasteError
{
Column = INDEX_PRECURSOR_MZ,
Line = row.Index,
Message = String.Format(Resources.PasteDlg_GetMoleculeTransitionGroup_The_precursor_m_z__0__is_not_measureable_with_your_current_instrument_settings_, moleculeInfo.Mz)
});
return null;
}
var customIon = moleculeInfo.ToCustomMolecule();
var isotopeLabelType = moleculeInfo.IsotopeLabelType ?? IsotopeLabelType.light;
Assume.IsTrue(Equals(pep.CustomMolecule.PrimaryEquivalenceKey, customIon.PrimaryEquivalenceKey)); // TODO(bspratt) error handling here
var adduct = moleculeInfo.Adduct;
if (!Equals(pep.CustomMolecule.MonoisotopicMass, customIon.MonoisotopicMass) && !adduct.HasIsotopeLabels)
{
// Some kind of undescribed isotope labeling going on
if ((!string.IsNullOrEmpty(pep.CustomMolecule.Formula) && Equals(pep.CustomMolecule.Formula, customIon.Formula)) ||
(string.IsNullOrEmpty(pep.CustomMolecule.Formula) && string.IsNullOrEmpty(customIon.Formula)))
{
// No formula for label, describe as mass
var labelMass = customIon.MonoisotopicMass - pep.CustomMolecule.MonoisotopicMass;
if (labelMass > 0)
{
adduct = adduct.ChangeIsotopeLabels(labelMass); // Isostopes add weight
isotopeLabelType = moleculeInfo.IsotopeLabelType ?? IsotopeLabelType.heavy;
}
}
}
var group = new TransitionGroup(pep, adduct, isotopeLabelType);
string errmsg;
try
{
var tran = GetMoleculeTransition(document, row, pep, group, requireProductInfo);
if (tran == null)
return null;
return new TransitionGroupDocNode(group, document.Annotations, document.Settings, null,
null, moleculeInfo.ExplicitTransitionGroupValues, null, new[] { tran }, true);
}
catch (InvalidDataException x)
{
errmsg = x.Message;
}
catch (InvalidOperationException x) // Adduct handling code can throw these
{
errmsg = x.Message;
}
ShowTransitionError(new PasteError
{
Column = INDEX_PRODUCT_MZ, // Don't actually know that mz was the issue, but at least it's the right row, and in the product columns
Line = row.Index,
Message = errmsg
});
return null;
}
private bool FragmentColumnsIdenticalToPrecursorColumns(ParsedIonInfo precursor, ParsedIonInfo fragment)
{
// Adducts must me non-empty, and match
if (Adduct.IsNullOrEmpty(precursor.Adduct) || !Equals(precursor.Adduct, fragment.Adduct))
{
return false;
}
// Formulas and/or masses must be non-empty, and match
return !((string.IsNullOrEmpty(precursor.Formula) || !Equals(precursor.Formula, fragment.Formula)) &&
!Equals(precursor.MonoMass, fragment.MonoMass));
}
private TransitionDocNode GetMoleculeTransition(SrmDocument document, Row row, Peptide pep, TransitionGroup group, bool requireProductInfo)
{
var precursorIon = ReadPrecursorOrProductColumns(document, row, null); // Re-read the precursor columns
var ion = requireProductInfo ? ReadPrecursorOrProductColumns(document, row, precursorIon) : precursorIon; // Re-read the product columns, or copy precursor
if (requireProductInfo && ion == null)
{
return null;
}
var customMolecule = ion.ToCustomMolecule();
var ionType = !requireProductInfo || // We inspected the input list and found only precursor info
FragmentColumnsIdenticalToPrecursorColumns(precursorIon, ion) ||
// Or the mass is explained by an isotopic label in the adduct
(Math.Abs(customMolecule.MonoisotopicMass.Value - group.PrecursorAdduct.ApplyIsotopeLabelsToMass(pep.CustomMolecule.MonoisotopicMass)) <= MzMatchTolerance &&
Math.Abs(customMolecule.AverageMass.Value - group.PrecursorAdduct.ApplyIsotopeLabelsToMass(pep.CustomMolecule.AverageMass)) <= MzMatchTolerance) // Same mass, must be a precursor transition
? IonType.precursor
: IonType.custom;
var massType = (ionType == IonType.precursor)
? document.Settings.TransitionSettings.Prediction.PrecursorMassType
: document.Settings.TransitionSettings.Prediction.FragmentMassType;
if (ionType == IonType.precursor)
{
customMolecule = pep.CustomMolecule; // Some mz-only lists will give precursor mz as double, and product mz as int, even though they're meant to be the same thing
}
var mass = customMolecule.GetMass(massType);
var transition = new Transition(group, ion.Adduct, null, customMolecule, ionType);
var annotations = document.Annotations;
if (!String.IsNullOrEmpty(ion.Note))
{
var note = document.Annotations.Note;
// ReSharper disable LocalizableElement
note = String.IsNullOrEmpty(note) ? ion.Note : (note + "\r\n" + ion.Note);
// ReSharper restore LocalizableElement
annotations = new Annotations(note, document.Annotations.ListAnnotations(), 0);
}
return new TransitionDocNode(transition, annotations, null, mass, TransitionDocNode.TransitionQuantInfo.DEFAULT, ion.ExplicitTransitionValues, null);
}
}
public class SmallMoleculeTransitionListCSVReader : SmallMoleculeTransitionListReader
{
private readonly DsvFileReader _csvReader;
public SmallMoleculeTransitionListCSVReader(IEnumerable<string> csvText) :
// ReSharper disable LocalizableElement
this(string.Join("\n", csvText))
// ReSharper restore LocalizableElement
{
}
public SmallMoleculeTransitionListCSVReader(string csvText)
{
// Accept either true CSV or currentculture equivalent
Type[] columnTypes;
IFormatProvider formatProvider;
char separator;
// Skip over header line to deduce decimal format
var endLine = csvText.IndexOf('\n');
var line = (endLine != -1 ? csvText.Substring(endLine+1) : csvText);
MassListImporter.IsColumnar(line, out formatProvider, out separator, out columnTypes);
// Double check that separator - does it appear in header row, or was it just an unlucky hit in a text field?
var header = (endLine != -1 ? csvText.Substring(0, endLine) : csvText);
if (!header.Contains(separator))
{
// Try again, this time without the distraction of a plausible but clearly incorrect seperator
MassListImporter.IsColumnar(line.Replace(separator,'_'), out formatProvider, out separator, out columnTypes);
}
_cultureInfo = formatProvider;
var reader = new StringReader(csvText);
_csvReader = new DsvFileReader(reader, separator, SmallMoleculeTransitionListColumnHeaders.KnownHeaderSynonyms);
// Do we recognize all the headers?
var badHeaders =
_csvReader.FieldNames.Where(
fn => SmallMoleculeTransitionListColumnHeaders.KnownHeaderSynonyms.All(kvp => string.Compare(kvp.Key, fn, StringComparison.OrdinalIgnoreCase) != 0)).ToList();
if (badHeaders.Any())
{
badHeaders.Add(string.Empty); // Add an empty line for more whitespace
throw new LineColNumberedIoException(
string.Format(
Resources.SmallMoleculeTransitionListReader_SmallMoleculeTransitionListReader_,
TextUtil.LineSeparate(badHeaders),
TextUtil.LineSeparate(SmallMoleculeTransitionListColumnHeaders.KnownHeaderSynonyms.Keys)),
1, _csvReader.FieldNames.IndexOf(badHeaders.First())+1);
}
string[] columns;
var index = 0;
while ((columns = _csvReader.ReadLine()) != null)
{
var row = new Row(this, index++, new List<string>(columns));
Rows.Add(row);
}
}
public int RowCount
{
get { return Rows.Count; }
}
public static bool IsPlausibleSmallMoleculeTransitionList(IEnumerable<string> csvText)
{
// ReSharper disable LocalizableElement
return IsPlausibleSmallMoleculeTransitionList(string.Join("\n", csvText));
// ReSharper restore LocalizableElement
}
public static bool IsPlausibleSmallMoleculeTransitionList(string csvText)
{
try
{
// This will throw if the headers don't look right
var probe = new SmallMoleculeTransitionListCSVReader(csvText);
// ReSharper disable once ConditionIsAlwaysTrueOrFalse
return probe != null;
}
catch
{
// Not a proper small molecule transition list, but was it trying to be one?
var header = csvText.Split('\n')[0];
if (header.ToLowerInvariant().Contains(@"peptide"))
{
return false;
}
return new[]
{
// These are pretty basic hints, without much overlap in peptide lists
SmallMoleculeTransitionListColumnHeaders.moleculeGroup, // May be seen in Agilent peptide lists
SmallMoleculeTransitionListColumnHeaders.namePrecursor,
SmallMoleculeTransitionListColumnHeaders.nameProduct,
SmallMoleculeTransitionListColumnHeaders.formulaPrecursor,
SmallMoleculeTransitionListColumnHeaders.adductPrecursor,
SmallMoleculeTransitionListColumnHeaders.idCAS,
SmallMoleculeTransitionListColumnHeaders.idInChiKey,
SmallMoleculeTransitionListColumnHeaders.idInChi,
SmallMoleculeTransitionListColumnHeaders.idHMDB,
SmallMoleculeTransitionListColumnHeaders.idSMILES,
}.Count(hint => SmallMoleculeTransitionListColumnHeaders.KnownHeaderSynonyms.Where(
p => string.Compare(p.Value, hint, StringComparison.OrdinalIgnoreCase) == 0).Any(kvp => header.IndexOf(kvp.Key, StringComparison.OrdinalIgnoreCase) >= 0)) > 1;
}
}
public override void UpdateCellBackingStore(int row, int col, object value)
{
// We don't have a backing store, unlike the dialog implementaion with its gridview
}
public override void ShowTransitionError(PasteError error)
{
throw new LineColNumberedIoException(
string.Format(
Resources
.InsertSmallMoleculeTransitionList_InsertSmallMoleculeTransitionList_Error_on_line__0___column_1____2_,
error.Line + 1, error.Column + 1, error.Message),
error.Line + 1, error.Column + 1);
}
public override int ColumnIndex(string columnName)
{
return _csvReader.GetFieldIndex(columnName);
}
}
// Custom molecule transition list internal column names, for saving to settings
public static class SmallMoleculeTransitionListColumnHeaders
{
public const string moleculeGroup = "MoleculeGroup";
public const string namePrecursor = "PrecursorName";
public const string nameProduct = "ProductName";
public const string formulaPrecursor = "PrecursorFormula";
public const string formulaProduct = "ProductFormula";
public const string mzPrecursor = "PrecursorMz";
public const string mzProduct = "ProductMz";
public const string chargePrecursor = "PrecursorCharge";
public const string chargeProduct = "ProductCharge";
public const string rtPrecursor = "PrecursorRT";
public const string rtWindowPrecursor = "PrecursorRTWindow";
public const string cePrecursor = "PrecursorCE";
public const string dtPrecursor = "PrecursorDT"; // Drift time - IMUnits is implied
public const string dtHighEnergyOffset = "HighEnergyDTOffset"; // Drift time - IMUnits is implied
public const string imPrecursor = "PrecursorIM";
public const string imHighEnergyOffset = "HighEnergyIMOffset";
public const string imUnits = "IMUnits";
public const string ccsPrecursor = "PrecursorCCS";
public const string slens = "SLens";
public const string coneVoltage = "ConeVoltage";
public const string compensationVoltage = "CompensationVoltage";
public const string declusteringPotential = "DeclusteringPotential";
public const string note = "Note";
public const string labelType = "LabelType";
public const string adductPrecursor = "PrecursorAdduct";
public const string adductProduct = "ProductAdduct";
public const string idCAS = "CAS";
public const string idInChiKey = "InChiKey";
public const string idInChi = "InChi";
public const string idHMDB = "HMDB";
public const string idSMILES = "SMILES";
public static readonly List<string> KnownHeaders;
public static IReadOnlyDictionary<string, string> KnownHeaderSynonyms;
static SmallMoleculeTransitionListColumnHeaders()
{
// The list of internal values, as used in serialization
KnownHeaders = new List<string>(new[]
{
moleculeGroup,
namePrecursor,
nameProduct,
formulaPrecursor,
formulaProduct,
mzPrecursor,
mzProduct,
chargePrecursor,
chargeProduct,
adductPrecursor,
adductProduct,
rtPrecursor,
rtWindowPrecursor,
cePrecursor,
dtPrecursor, // Drift time - IMUnits implied
dtHighEnergyOffset, // Drift time - IMUnits implied
imPrecursor, // General ion mobility, imUnits required
imHighEnergyOffset,
imUnits,
ccsPrecursor,
slens,
coneVoltage,
compensationVoltage,
declusteringPotential,
note,
labelType,
idInChiKey,
idCAS,
idHMDB,
idInChi,
idSMILES,
});
// A dictionary of terms that can be understood as column headers - this includes
// the internal names, and the names presented in the UI (for all supported cultures)
var currentCulture = Thread.CurrentThread.CurrentCulture;
var currentUICulture = Thread.CurrentThread.CurrentUICulture;
var knownColumnHeadersAllCultures = KnownHeaders.ToDictionary( hdr => hdr, hdr => hdr);
foreach (var culture in new[] { @"en", @"zh-CHS", @"ja" })
{
Thread.CurrentThread.CurrentUICulture =
Thread.CurrentThread.CurrentCulture = new CultureInfo(culture);
foreach (var pair in new[] {
Tuple.Create(moleculeGroup, Resources.PasteDlg_UpdateMoleculeType_Molecule_List_Name),
Tuple.Create(namePrecursor, Resources.PasteDlg_UpdateMoleculeType_Precursor_Name),
Tuple.Create(namePrecursor, Resources.SmallMoleculeTransitionListColumnHeaders_SmallMoleculeTransitionListColumnHeaders_Molecule),
Tuple.Create(namePrecursor, Resources.SmallMoleculeTransitionListColumnHeaders_SmallMoleculeTransitionListColumnHeaders_Compound),
Tuple.Create(nameProduct, Resources.PasteDlg_UpdateMoleculeType_Product_Name),
Tuple.Create(formulaPrecursor, Resources.PasteDlg_UpdateMoleculeType_Precursor_Formula),
Tuple.Create(formulaProduct, Resources.PasteDlg_UpdateMoleculeType_Product_Formula),
Tuple.Create(mzPrecursor, Resources.PasteDlg_UpdateMoleculeType_Precursor_m_z),
Tuple.Create(mzProduct, Resources.PasteDlg_UpdateMoleculeType_Product_m_z),
Tuple.Create(chargePrecursor, Resources.PasteDlg_UpdateMoleculeType_Precursor_Charge),
Tuple.Create(chargeProduct, Resources.PasteDlg_UpdateMoleculeType_Product_Charge),
Tuple.Create(adductPrecursor, Resources.PasteDlg_UpdateMoleculeType_Precursor_Adduct),
Tuple.Create(adductProduct, Resources.PasteDlg_UpdateMoleculeType_Product_Adduct),
Tuple.Create(rtPrecursor, Resources.PasteDlg_UpdateMoleculeType_Explicit_Retention_Time),
Tuple.Create(rtPrecursor, Resources.SmallMoleculeTransitionListColumnHeaders_SmallMoleculeTransitionListColumnHeaders_RT__min_), // ""RT (min)"
Tuple.Create(rtWindowPrecursor, Resources.PasteDlg_UpdateMoleculeType_Explicit_Retention_Time_Window),
Tuple.Create(cePrecursor, Resources.PasteDlg_UpdateMoleculeType_Explicit_Collision_Energy),
Tuple.Create(dtPrecursor, Resources.PasteDlg_UpdateMoleculeType_Explicit_Drift_Time__msec_),
Tuple.Create(dtHighEnergyOffset, Resources.PasteDlg_UpdateMoleculeType_Explicit_Drift_Time_High_Energy_Offset__msec_),
Tuple.Create(imPrecursor, Resources.PasteDlg_UpdateMoleculeType_Explicit_Ion_Mobility),
Tuple.Create(imHighEnergyOffset, Resources.PasteDlg_UpdateMoleculeType_Explicit_Ion_Mobility_High_Energy_Offset),
Tuple.Create(imUnits, Resources.PasteDlg_UpdateMoleculeType_Explicit_Ion_Mobility_Units),
Tuple.Create(ccsPrecursor, Resources.PasteDlg_UpdateMoleculeType_Collisional_Cross_Section__sq_A_),
Tuple.Create(slens, Resources.PasteDlg_UpdateMoleculeType_S_Lens),
Tuple.Create(coneVoltage, Resources.PasteDlg_UpdateMoleculeType_Cone_Voltage),
Tuple.Create(compensationVoltage, Resources.PasteDlg_UpdateMoleculeType_Explicit_Compensation_Voltage),
Tuple.Create(declusteringPotential, Resources.PasteDlg_UpdateMoleculeType_Explicit_Declustering_Potential),
Tuple.Create(note, Resources.PasteDlg_UpdateMoleculeType_Note),
Tuple.Create(labelType, Resources.PasteDlg_UpdateMoleculeType_Label_Type),
Tuple.Create(idInChiKey, idInChiKey),
Tuple.Create(idCAS, idCAS),
Tuple.Create(idHMDB, idHMDB),
Tuple.Create(idInChi, idInChi),
Tuple.Create(idSMILES, idSMILES),
})
{
if (!knownColumnHeadersAllCultures.ContainsKey(pair.Item2))
{
knownColumnHeadersAllCultures.Add(pair.Item2, pair.Item1);
}
var mz = pair.Item2.Replace(@"m/z", @"mz"); // Accept either m/z or mz
if (!knownColumnHeadersAllCultures.ContainsKey(mz))
{
knownColumnHeadersAllCultures.Add(mz, pair.Item1);
}
}
}
Thread.CurrentThread.CurrentCulture = currentCulture;
Thread.CurrentThread.CurrentUICulture = currentUICulture;
KnownHeaderSynonyms = knownColumnHeadersAllCultures;
}
}
}
| 1 | 12,796 | I think this is redundant since "NullForEmpty" already calls "Trim()". | ProteoWizard-pwiz | .cs |
@@ -317,6 +317,8 @@ type CloudBackupRestoreRequest struct {
}
type CloudBackupGroupCreateResponse struct {
+ // ID for this group of backups
+ GroupCloudBackupID string
// Names of the tasks performing this group backup
Names []string
} | 1 | package api
import (
"context"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/libopenstorage/openstorage/pkg/auth"
"github.com/mohae/deepcopy"
)
// Strings for VolumeSpec
const (
Name = "name"
Token = "token"
SpecNodes = "nodes"
SpecParent = "parent"
SpecEphemeral = "ephemeral"
SpecShared = "shared"
SpecJournal = "journal"
SpecSharedv4 = "sharedv4"
SpecCascaded = "cascaded"
SpecSticky = "sticky"
SpecSecure = "secure"
SpecCompressed = "compressed"
SpecSize = "size"
SpecScale = "scale"
SpecFilesystem = "fs"
SpecBlockSize = "block_size"
SpecQueueDepth = "queue_depth"
SpecHaLevel = "repl"
SpecPriority = "io_priority"
SpecSnapshotInterval = "snap_interval"
SpecSnapshotSchedule = "snap_schedule"
SpecAggregationLevel = "aggregation_level"
SpecDedupe = "dedupe"
SpecPassphrase = "secret_key"
SpecAutoAggregationValue = "auto"
SpecGroup = "group"
SpecGroupEnforce = "fg"
SpecZones = "zones"
SpecRacks = "racks"
SpecRack = "rack"
SpecRegions = "regions"
SpecLabels = "labels"
SpecPriorityAlias = "priority_io"
SpecIoProfile = "io_profile"
SpecAsyncIo = "async_io"
SpecEarlyAck = "early_ack"
// SpecBestEffortLocationProvisioning default is false. If set provisioning request will succeed
// even if specified data location parameters could not be satisfied.
SpecBestEffortLocationProvisioning = "best_effort_location_provisioning"
// SpecForceUnsuppportedFsType is of type boolean and if true it sets
// the VolumeSpec.force_unsupported_fs_type. When set to true it asks
// the driver to use an unsupported value of VolumeSpec.format if possible
SpecForceUnsupportedFsType = "force_unsupported_fs_type"
SpecNodiscard = "nodiscard"
)
// OptionKey specifies a set of recognized query params.
const (
// OptName query parameter used to lookup volume by name.
OptName = "Name"
// OptVolumeID query parameter used to lookup volume by ID.
OptVolumeID = "VolumeID"
// OptSnapID query parameter used to lookup snapshot by ID.
OptSnapID = "SnapID"
// OptLabel query parameter used to lookup volume by set of labels.
OptLabel = "Label"
// OptConfigLabel query parameter used to lookup volume by set of labels.
OptConfigLabel = "ConfigLabel"
// OptCumulative query parameter used to request cumulative stats.
OptCumulative = "Cumulative"
// OptTimeout query parameter used to indicate timeout seconds
OptTimeoutSec = "TimeoutSec"
// OptQuiesceID query parameter use for quiesce
OptQuiesceID = "QuiesceID"
// OptCredUUID is the UUID of the credential
OptCredUUID = "CredUUID"
// OptCredName indicates unique name of credential
OptCredName = "CredName"
// OptCredType indicates type of credential
OptCredType = "CredType"
// OptCredEncrKey is the key used to encrypt data
OptCredEncrKey = "CredEncrypt"
// OptCredRegion indicates the region for s3
OptCredRegion = "CredRegion"
// OptCredDisableSSL indicated if SSL should be disabled
OptCredDisableSSL = "CredDisableSSL"
// OptCredEndpoint indicate the cloud endpoint
OptCredEndpoint = "CredEndpoint"
// OptCredAccKey for s3
OptCredAccessKey = "CredAccessKey"
// OptCredSecretKey for s3
OptCredSecretKey = "CredSecretKey"
// OptCredBucket is the optional bucket name
OptCredBucket = "CredBucket"
// OptCredGoogleProjectID projectID for google cloud
OptCredGoogleProjectID = "CredProjectID"
// OptCredGoogleJsonKey for google cloud
OptCredGoogleJsonKey = "CredJsonKey"
// OptCredAzureAccountName is the account name for
// azure as the cloud provider
OptCredAzureAccountName = "CredAccountName"
// OptOptCredAzureAccountKey is the accountkey for
// azure as the cloud provider
OptCredAzureAccountKey = "CredAccountKey"
// OptCloudBackupID is the backID in the cloud
OptCloudBackupID = "CloudBackID"
// OptSrcVolID is the source volume ID of the backup
OptSrcVolID = "SrcVolID"
// OptBkupOpState is the desired operational state
// (stop/pause/resume) of backup/restore
OptBkupOpState = "OpState"
// OptBackupSchedUUID is the UUID of the backup-schedule
OptBackupSchedUUID = "BkupSchedUUID"
// OptVolumeSubFolder query parameter used to catalog a particular path inside a volume
OptCatalogSubFolder = "subfolder"
// OptCatalogMaxDepth query parameter used to limit the depth we return
OptCatalogMaxDepth = "depth"
)
// Api clientserver Constants
const (
OsdVolumePath = "osd-volumes"
OsdSnapshotPath = "osd-snapshot"
OsdCredsPath = "osd-creds"
OsdBackupPath = "osd-backup"
OsdMigratePath = "osd-migrate"
OsdMigrateStartPath = OsdMigratePath + "/start"
OsdMigrateCancelPath = OsdMigratePath + "/cancel"
OsdMigrateStatusPath = OsdMigratePath + "/status"
TimeLayout = "Jan 2 15:04:05 UTC 2006"
)
const (
// AutoAggregation value indicates driver to select aggregation level.
AutoAggregation = math.MaxUint32
)
// Node describes the state of a node.
// It includes the current physical state (CPU, memory, storage, network usage) as
// well as the containers running on the system.
//
// swagger:model
type Node struct {
// Id of the node.
Id string
// SchedulerNodeName is name of the node in scheduler context. It can be
// empty if unable to get the name from the scheduler.
SchedulerNodeName string
// Cpu usage of the node.
Cpu float64 // percentage.
// Total Memory of the node
MemTotal uint64
// Used Memory of the node
MemUsed uint64
// Free Memory of the node
MemFree uint64
// Average load (percentage)
Avgload int
// Node Status see (Status object)
Status Status
// GenNumber of the node
GenNumber uint64
// List of disks on this node.
Disks map[string]StorageResource
// List of storage pools this node supports
Pools []StoragePool
// Management IP
MgmtIp string
// Data IP
DataIp string
// Timestamp
Timestamp time.Time
// Start time of this node
StartTime time.Time
// Hostname of this node
Hostname string
// Node data for this node (EX: Public IP, Provider, City..)
NodeData map[string]interface{}
// User defined labels for node. Key Value pairs
NodeLabels map[string]string
// GossipPort is the port used by the gossip protocol
GossipPort string
}
// FluentDConfig describes ip and port of a fluentdhost.
// DEPRECATED
//
// swagger:model
type FluentDConfig struct {
IP string `json:"ip"`
Port string `json:"port"`
}
// TunnelConfig describes key, cert and endpoint of a reverse proxy tunnel
// DEPRECATED
//
// swagger:model
type TunnelConfig struct {
Key string `json:"key"`
Cert string `json:"cert"`
Endpoint string `json:"tunnel_endpoint"`
}
// Cluster represents the state of the cluster.
//
// swagger:model
type Cluster struct {
Status Status
// Id of the cluster.
//
// required: true
Id string
// Id of the node on which this cluster object is initialized
NodeId string
// array of all the nodes in the cluster.
Nodes []Node
// Logging url for the cluster.
LoggingURL string
// Management url for the cluster
ManagementURL string
// FluentD Host for the cluster
FluentDConfig FluentDConfig
// TunnelConfig for the cluster [key, cert, endpoint]
TunnelConfig TunnelConfig
}
// CredCreateRequest is the input for CredCreate command
type CredCreateRequest struct {
// InputParams is map describing cloud provide
InputParams map[string]string
}
// CredCreateResponse is returned for CredCreate command
type CredCreateResponse struct {
// UUID of the credential that was just created
UUID string
}
// StatPoint represents the basic structure of a single Stat reported
// TODO: This is the first step to introduce stats in openstorage.
// Follow up task is to introduce an API for logging stats
type StatPoint struct {
// Name of the Stat
Name string
// Tags for the Stat
Tags map[string]string
// Fields and values of the stat
Fields map[string]interface{}
// Timestamp in Unix format
Timestamp int64
}
type CloudBackupCreateRequest struct {
// VolumeID of the volume for which cloudbackup is requested
VolumeID string
// CredentialUUID is cloud credential to be used for backup
CredentialUUID string
// Full indicates if full backup is desired even though incremental is possible
Full bool
// Name is optional unique id to be used for this backup
// If not specified backup creates this by default
Name string
// Labels are list of key value pairs to tag the cloud backup. These labels
// are stored in the metadata associated with the backup.
Labels map[string]string
}
type CloudBackupCreateResponse struct {
// Name of the task performing this backup
Name string
}
type CloudBackupGroupCreateRequest struct {
// GroupID indicates backup request for a volumegroup with this group id
GroupID string
// Labels indicates backup request for a volume group with these labels
Labels map[string]string
// VolumeIDs are a list of volume IDs to use for the backup request
// If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of
// them are backed up to cloud
VolumeIDs []string
// CredentialUUID is cloud credential to be used for backup
CredentialUUID string
// Full indicates if full backup is desired even though incremental is possible
Full bool
}
type CloudBackupRestoreRequest struct {
// ID is the backup ID being restored
ID string
// RestoreVolumeName is optional volume Name of the new volume to be created
// in the cluster for restoring the cloudbackup
RestoreVolumeName string
// CredentialUUID is the credential to be used for restore operation
CredentialUUID string
// NodeID is the optional NodeID for provisioning restore
// volume (ResoreVolumeName should not be specified)
NodeID string
// Name is optional unique id to be used for this restore op
// restore creates this by default
Name string
}
type CloudBackupGroupCreateResponse struct {
// Names of the tasks performing this group backup
Names []string
}
type CloudBackupRestoreResponse struct {
// RestoreVolumeID is the volumeID to which the backup is being restored
RestoreVolumeID string
// Name of the task performing this restore
Name string
}
type CloudBackupGenericRequest struct {
// SrcVolumeID is optional Source VolumeID for the request
SrcVolumeID string
// ClusterID is the optional clusterID for the request
ClusterID string
// CredentialUUID is the credential for cloud to be used for the request
CredentialUUID string
// All if set to true, backups for all clusters in the cloud are processed
All bool
}
type CloudBackupInfo struct {
// ID is the ID of the cloud backup
ID string
// SrcVolumeID is Source volumeID of the backup
SrcVolumeID string
// SrcvolumeName is name of the sourceVolume of the backup
SrcVolumeName string
// Timestamp is the timestamp at which the source volume
// was backed up to cloud
Timestamp time.Time
// Metadata associated with the backup
Metadata map[string]string
// Status indicates the status of the backup
Status string
}
type CloudBackupEnumerateRequest struct {
CloudBackupGenericRequest
}
type CloudBackupEnumerateResponse struct {
// Backups is list of backups in cloud for given volume/cluster/s
Backups []CloudBackupInfo
}
type CloudBackupDeleteRequest struct {
// ID is the ID of the cloud backup
ID string
// CredentialUUID is the credential for cloud to be used for the request
CredentialUUID string
// Force Delete cloudbackup even if there are dependencies
Force bool
}
type CloudBackupDeleteAllRequest struct {
CloudBackupGenericRequest
}
type CloudBackupStatusRequest struct {
// SrcVolumeID optional volumeID to list status of backup/restore
SrcVolumeID string
// Local indicates if only those backups/restores that are
// active on current node must be returned
Local bool
// Name of the backup/restore task. If this is specified, SrcVolumeID is
// ignored
Name string
}
type CloudBackupOpType string
const (
CloudBackupOp = CloudBackupOpType("Backup")
CloudRestoreOp = CloudBackupOpType("Restore")
)
type CloudBackupStatusType string
const (
CloudBackupStatusNotStarted = CloudBackupStatusType("NotStarted")
CloudBackupStatusDone = CloudBackupStatusType("Done")
CloudBackupStatusAborted = CloudBackupStatusType("Aborted")
CloudBackupStatusPaused = CloudBackupStatusType("Paused")
CloudBackupStatusStopped = CloudBackupStatusType("Stopped")
CloudBackupStatusActive = CloudBackupStatusType("Active")
CloudBackupStatusQueued = CloudBackupStatusType("Queued")
CloudBackupStatusFailed = CloudBackupStatusType("Failed")
)
const (
CloudBackupRequestedStatePause = "pause"
CloudBackupRequestedStateResume = "resume"
CloudBackupRequestedStateStop = "stop"
)
type CloudBackupStatus struct {
// ID is the ID for the operation
ID string
// OpType indicates if this is a backup or restore
OpType CloudBackupOpType
// State indicates if the op is currently active/done/failed
Status CloudBackupStatusType
// BytesDone indicates Bytes uploaded/downloaded so far
BytesDone uint64
// BytesTotal is the total number of bytes being transferred
BytesTotal uint64
// EtaSeconds estimated time in seconds for backup/restore completion
EtaSeconds int64
// StartTime indicates Op's start time
StartTime time.Time
// CompletedTime indicates Op's completed time
CompletedTime time.Time
// NodeID is the ID of the node where this Op is active
NodeID string
// SrcVolumeID is either the volume being backed-up or target volume to
// which a cloud backup is being restored
SrcVolumeID string
// Info currently indicates only failure cause in case of failed backup/restore
Info []string
// CredentialUUID used for this backup/restore op
CredentialUUID string
}
type CloudBackupStatusResponse struct {
// statuses is list of currently active/failed/done backup/restores
// map key is the id of the task
Statuses map[string]CloudBackupStatus
}
type CloudBackupCatalogRequest struct {
// ID is Backup ID in the cloud
ID string
// CredentialUUID is the credential for cloud
CredentialUUID string
}
type CloudBackupCatalogResponse struct {
// Contents is listing of backup contents
Contents []string
}
type CloudBackupHistoryRequest struct {
// SrcVolumeID is volumeID for which history of backup/restore
// is being requested
SrcVolumeID string
}
type CloudBackupHistoryItem struct {
// SrcVolumeID is volume ID which was backedup
SrcVolumeID string
// TimeStamp is the time at which either backup completed/failed
Timestamp time.Time
// Status indicates whether backup was completed/failed
Status string
}
type CloudBackupHistoryResponse struct {
// HistoryList is list of past backup/restores in the cluster
HistoryList []CloudBackupHistoryItem
}
type CloudBackupStateChangeRequest struct {
// Name of the backup/restore task for which state change
// is being requested
Name string
// RequestedState is desired state of the op
// can be pause/resume/stop
RequestedState string
}
type CloudBackupScheduleInfo struct {
// SrcVolumeID is the schedule's source volume
SrcVolumeID string
// CredentialUUID is the cloud credential used with this schedule
CredentialUUID string
// Schedule is the frequence of backup
Schedule string
// MaxBackups are the maximum number of backups retained
// in cloud.Older backups are deleted
MaxBackups uint
// GroupID indicates the group of volumes for this cloudbackup schedule
GroupID string
// Labels indicates a volume group for this cloudsnap schedule
Labels map[string]string
// Full indicates if scheduled backups must be full always
Full bool
}
type CloudBackupSchedCreateRequest struct {
CloudBackupScheduleInfo
}
type CloudBackupGroupSchedCreateRequest struct {
// GroupID indicates the group of volumes for which cloudbackup schedule is
// being created
GroupID string
// Labels indicates a volume group for which this group cloudsnap schedule is
// being created. If this is provided GroupId is not needed and vice-versa.
Labels map[string]string
// VolumeIDs are a list of volume IDs to use for the backup request
// If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of
// them are backed up to cloud
VolumeIDs []string
// CredentialUUID is cloud credential to be used with this schedule
CredentialUUID string
// Schedule is the frequency of backup
Schedule string
// MaxBackups are the maximum number of backups retained
// in cloud.Older backups are deleted
MaxBackups uint
// Full indicates if scheduled backups must be full always
Full bool
}
type CloudBackupSchedCreateResponse struct {
// UUID is the UUID of the newly created schedule
UUID string
}
type CloudBackupSchedDeleteRequest struct {
// UUID is UUID of the schedule to be deleted
UUID string
}
type CloudBackupSchedEnumerateResponse struct {
// Schedule is map of schedule uuid to scheduleInfo
Schedules map[string]CloudBackupScheduleInfo
}
// Defines the response for CapacityUsage request
type CapacityUsageResponse struct {
CapacityUsageInfo *CapacityUsageInfo
// Describes the err if all of the usage details could not be obtained
Error error
}
//
// DriverTypeSimpleValueOf returns the string format of DriverType
func DriverTypeSimpleValueOf(s string) (DriverType, error) {
obj, err := simpleValueOf("driver_type", DriverType_value, s)
return DriverType(obj), err
}
// SimpleString returns the string format of DriverType
func (x DriverType) SimpleString() string {
return simpleString("driver_type", DriverType_name, int32(x))
}
// FSTypeSimpleValueOf returns the string format of FSType
func FSTypeSimpleValueOf(s string) (FSType, error) {
obj, err := simpleValueOf("fs_type", FSType_value, s)
return FSType(obj), err
}
// SimpleString returns the string format of DriverType
func (x FSType) SimpleString() string {
return simpleString("fs_type", FSType_name, int32(x))
}
// CosTypeSimpleValueOf returns the string format of CosType
func CosTypeSimpleValueOf(s string) (CosType, error) {
obj, exists := CosType_value[strings.ToUpper(s)]
if !exists {
return -1, fmt.Errorf("Invalid cos value: %s", s)
}
return CosType(obj), nil
}
// SimpleString returns the string format of CosType
func (x CosType) SimpleString() string {
return simpleString("cos_type", CosType_name, int32(x))
}
// GraphDriverChangeTypeSimpleValueOf returns the string format of GraphDriverChangeType
func GraphDriverChangeTypeSimpleValueOf(s string) (GraphDriverChangeType, error) {
obj, err := simpleValueOf("graph_driver_change_type", GraphDriverChangeType_value, s)
return GraphDriverChangeType(obj), err
}
// SimpleString returns the string format of GraphDriverChangeType
func (x GraphDriverChangeType) SimpleString() string {
return simpleString("graph_driver_change_type", GraphDriverChangeType_name, int32(x))
}
// VolumeActionParamSimpleValueOf returns the string format of VolumeAction
func VolumeActionParamSimpleValueOf(s string) (VolumeActionParam, error) {
obj, err := simpleValueOf("volume_action_param", VolumeActionParam_value, s)
return VolumeActionParam(obj), err
}
// SimpleString returns the string format of VolumeAction
func (x VolumeActionParam) SimpleString() string {
return simpleString("volume_action_param", VolumeActionParam_name, int32(x))
}
// VolumeStateSimpleValueOf returns the string format of VolumeState
func VolumeStateSimpleValueOf(s string) (VolumeState, error) {
obj, err := simpleValueOf("volume_state", VolumeState_value, s)
return VolumeState(obj), err
}
// SimpleString returns the string format of VolumeState
func (x VolumeState) SimpleString() string {
return simpleString("volume_state", VolumeState_name, int32(x))
}
// VolumeStatusSimpleValueOf returns the string format of VolumeStatus
func VolumeStatusSimpleValueOf(s string) (VolumeStatus, error) {
obj, err := simpleValueOf("volume_status", VolumeStatus_value, s)
return VolumeStatus(obj), err
}
// SimpleString returns the string format of VolumeStatus
func (x VolumeStatus) SimpleString() string {
return simpleString("volume_status", VolumeStatus_name, int32(x))
}
// IoProfileSimpleValueOf returns the string format of IoProfile
func IoProfileSimpleValueOf(s string) (IoProfile, error) {
obj, err := simpleValueOf("io_profile", IoProfile_value, s)
return IoProfile(obj), err
}
// SimpleString returns the string format of IoProfile
func (x IoProfile) SimpleString() string {
return simpleString("io_profile", IoProfile_name, int32(x))
}
func simpleValueOf(typeString string, valueMap map[string]int32, s string) (int32, error) {
obj, ok := valueMap[strings.ToUpper(fmt.Sprintf("%s_%s", typeString, s))]
if !ok {
return 0, fmt.Errorf("no openstorage.%s for %s", strings.ToUpper(typeString), s)
}
return obj, nil
}
func simpleString(typeString string, nameMap map[int32]string, v int32) string {
s, ok := nameMap[v]
if !ok {
return strconv.Itoa(int(v))
}
return strings.TrimPrefix(strings.ToLower(s), fmt.Sprintf("%s_", strings.ToLower(typeString)))
}
func toSec(ms uint64) uint64 {
return ms / 1000
}
// WriteThroughput returns the write throughput
func (v *Stats) WriteThroughput() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.WriteBytes) / intv
}
// ReadThroughput returns the read throughput
func (v *Stats) ReadThroughput() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.ReadBytes) / intv
}
// Latency returns latency
func (v *Stats) Latency() uint64 {
ops := v.Writes + v.Reads
if ops == 0 {
return 0
}
return (uint64)((v.IoMs * 1000) / ops)
}
// Read latency returns avg. time required for read operation to complete
func (v *Stats) ReadLatency() uint64 {
if v.Reads == 0 {
return 0
}
return (uint64)((v.ReadMs * 1000) / v.Reads)
}
// Write latency returns avg. time required for write operation to complete
func (v *Stats) WriteLatency() uint64 {
if v.Writes == 0 {
return 0
}
return (uint64)((v.WriteMs * 1000) / v.Writes)
}
// Iops returns iops
func (v *Stats) Iops() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.Writes + v.Reads) / intv
}
// Scaled returns true if the volume is scaled.
func (v *Volume) Scaled() bool {
return v.Spec.Scale > 1
}
// Contains returns true if mid is a member of volume's replication set.
func (m *Volume) Contains(mid string) bool {
rsets := m.GetReplicaSets()
for _, rset := range rsets {
for _, node := range rset.Nodes {
if node == mid {
return true
}
}
}
return false
}
// Copy makes a deep copy of VolumeSpec
func (s *VolumeSpec) Copy() *VolumeSpec {
spec := *s
if s.ReplicaSet != nil {
spec.ReplicaSet = &ReplicaSet{Nodes: make([]string, len(s.ReplicaSet.Nodes))}
copy(spec.ReplicaSet.Nodes, s.ReplicaSet.Nodes)
}
return &spec
}
// Copy makes a deep copy of Node
func (s *Node) Copy() *Node {
localCopy := deepcopy.Copy(*s)
nodeCopy := localCopy.(Node)
return &nodeCopy
}
func (v Volume) IsClone() bool {
return v.Source != nil && len(v.Source.Parent) != 0 && !v.Readonly
}
func (v Volume) IsSnapshot() bool {
return v.Source != nil && len(v.Source.Parent) != 0 && v.Readonly
}
func (v Volume) DisplayId() string {
if v.Locator != nil {
return fmt.Sprintf("%s (%s)", v.Locator.Name, v.Id)
} else {
return v.Id
}
}
// ToStorageNode converts a Node structure to an exported gRPC StorageNode struct
func (s *Node) ToStorageNode() *StorageNode {
node := &StorageNode{
Id: s.Id,
SchedulerNodeName: s.SchedulerNodeName,
Cpu: s.Cpu,
MemTotal: s.MemTotal,
MemUsed: s.MemUsed,
MemFree: s.MemFree,
AvgLoad: int64(s.Avgload),
Status: s.Status,
MgmtIp: s.MgmtIp,
DataIp: s.DataIp,
Hostname: s.Hostname,
}
node.Disks = make(map[string]*StorageResource)
for k, v := range s.Disks {
node.Disks[k] = &v
}
node.NodeLabels = make(map[string]string)
for k, v := range s.NodeLabels {
node.NodeLabels[k] = v
}
node.Pools = make([]*StoragePool, len(s.Pools))
for i, v := range s.Pools {
node.Pools[i] = &v
}
return node
}
// ToStorageCluster converts a Cluster structure to an exported gRPC StorageCluster struct
func (c *Cluster) ToStorageCluster() *StorageCluster {
cluster := &StorageCluster{
Status: c.Status,
// Due to history, the cluster ID is normally the name of the cluster, not the
// unique identifier
Name: c.Id,
}
return cluster
}
func CloudBackupStatusTypeToSdkCloudBackupStatusType(
t CloudBackupStatusType,
) SdkCloudBackupStatusType {
switch t {
case CloudBackupStatusNotStarted:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeNotStarted
case CloudBackupStatusDone:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeDone
case CloudBackupStatusAborted:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeAborted
case CloudBackupStatusPaused:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypePaused
case CloudBackupStatusStopped:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeStopped
case CloudBackupStatusActive:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeActive
case CloudBackupStatusFailed:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeFailed
default:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeUnknown
}
}
func StringToSdkCloudBackupStatusType(s string) SdkCloudBackupStatusType {
return CloudBackupStatusTypeToSdkCloudBackupStatusType(CloudBackupStatusType(s))
}
func (b *CloudBackupInfo) ToSdkCloudBackupInfo() *SdkCloudBackupInfo {
info := &SdkCloudBackupInfo{
Id: b.ID,
SrcVolumeId: b.SrcVolumeID,
SrcVolumeName: b.SrcVolumeName,
Metadata: b.Metadata,
}
info.Timestamp, _ = ptypes.TimestampProto(b.Timestamp)
info.Status = StringToSdkCloudBackupStatusType(b.Status)
return info
}
func (r *CloudBackupEnumerateResponse) ToSdkCloudBackupEnumerateWithFiltersResponse() *SdkCloudBackupEnumerateWithFiltersResponse {
resp := &SdkCloudBackupEnumerateWithFiltersResponse{
Backups: make([]*SdkCloudBackupInfo, len(r.Backups)),
}
for i, v := range r.Backups {
resp.Backups[i] = v.ToSdkCloudBackupInfo()
}
return resp
}
func CloudBackupOpTypeToSdkCloudBackupOpType(t CloudBackupOpType) SdkCloudBackupOpType {
switch t {
case CloudBackupOp:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeBackupOp
case CloudRestoreOp:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeRestoreOp
default:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeUnknown
}
}
func StringToSdkCloudBackupOpType(s string) SdkCloudBackupOpType {
return CloudBackupOpTypeToSdkCloudBackupOpType(CloudBackupOpType(s))
}
func (s CloudBackupStatus) ToSdkCloudBackupStatus() *SdkCloudBackupStatus {
status := &SdkCloudBackupStatus{
BackupId: s.ID,
Optype: CloudBackupOpTypeToSdkCloudBackupOpType(s.OpType),
Status: CloudBackupStatusTypeToSdkCloudBackupStatusType(s.Status),
BytesDone: s.BytesDone,
NodeId: s.NodeID,
Info: s.Info,
CredentialId: s.CredentialUUID,
SrcVolumeId: s.SrcVolumeID,
EtaSeconds: s.EtaSeconds,
BytesTotal: s.BytesTotal,
}
status.StartTime, _ = ptypes.TimestampProto(s.StartTime)
status.CompletedTime, _ = ptypes.TimestampProto(s.CompletedTime)
return status
}
func (r *CloudBackupStatusResponse) ToSdkCloudBackupStatusResponse() *SdkCloudBackupStatusResponse {
resp := &SdkCloudBackupStatusResponse{
Statuses: make(map[string]*SdkCloudBackupStatus),
}
for k, v := range r.Statuses {
resp.Statuses[k] = v.ToSdkCloudBackupStatus()
}
return resp
}
func (h CloudBackupHistoryItem) ToSdkCloudBackupHistoryItem() *SdkCloudBackupHistoryItem {
item := &SdkCloudBackupHistoryItem{
SrcVolumeId: h.SrcVolumeID,
Status: StringToSdkCloudBackupStatusType(h.Status),
}
item.Timestamp, _ = ptypes.TimestampProto(h.Timestamp)
return item
}
func (r *CloudBackupHistoryResponse) ToSdkCloudBackupHistoryResponse() *SdkCloudBackupHistoryResponse {
resp := &SdkCloudBackupHistoryResponse{
HistoryList: make([]*SdkCloudBackupHistoryItem, len(r.HistoryList)),
}
for i, v := range r.HistoryList {
resp.HistoryList[i] = v.ToSdkCloudBackupHistoryItem()
}
return resp
}
func (l *VolumeLocator) MergeVolumeSpecLabels(s *VolumeSpec) *VolumeLocator {
for k, v := range s.GetVolumeLabels() {
l.VolumeLabels[k] = v
}
return l
}
func (v *Volume) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool {
return v.GetSpec().IsPermitted(ctx, accessType)
}
func (v *VolumeSpec) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool {
if v.IsPublic() {
return true
}
// Volume is not public, check permission
if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok {
// Check Access
return v.IsPermittedFromUserInfo(userinfo, accessType)
} else {
// There is no user information in the context so
// authorization is not running
return true
}
}
func (v *VolumeSpec) IsPermittedFromUserInfo(user *auth.UserInfo, accessType Ownership_AccessType) bool {
if v.IsPublic() {
return true
}
if v.GetOwnership() != nil {
return v.GetOwnership().IsPermitted(user, accessType)
}
return true
}
func (v *VolumeSpec) IsPublic() bool {
return v.GetOwnership() == nil || v.GetOwnership().IsPublic()
}
// GetCloneCreatorOwnership returns the appropriate ownership for the
// new snapshot and if an update is required
func (v *VolumeSpec) GetCloneCreatorOwnership(ctx context.Context) (*Ownership, bool) {
o := v.GetOwnership()
// If there is user information, then auth is enabled
if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok {
// Check if the owner is the one who cloned it
if o != nil && o.IsOwner(userinfo) {
return o, false
}
// Not the same owner, we now need new ownership.
// This works for public volumes also.
return OwnershipSetUsernameFromContext(ctx, nil), true
}
return o, false
}
| 1 | 7,833 | Change this to IDs too? | libopenstorage-openstorage | go |
@@ -221,6 +221,11 @@ public class RaidsPlugin extends Plugin
@Getter
private final List<String> layoutWhitelist = new ArrayList<>();
+ @Getter
+ private final ImmutableSet<String> list_of_DC_SCOUT_RAIDS = ImmutableSet.of(
+ "SCPFCCSPCF", "CSPFCCCSSF", "SCFPCSCPCF", "PCSFCPCSCF", "SCCFCPSCSF", "SCPFCCCSSF",
+ "SCPFCPCSCF"
+ );
@Getter
private Raid raid;
| 1 | /*
* Copyright (c) 2018, Kamiel
* Copyright (c) 2019, ganom <https://github.com/Ganom>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.raids;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.inject.Binder;
import com.google.inject.Provides;
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.text.DecimalFormat;
import java.time.Instant;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.inject.Inject;
import javax.inject.Singleton;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.ChatMessageType;
import net.runelite.api.Client;
import net.runelite.api.GameState;
import net.runelite.api.InstanceTemplates;
import net.runelite.api.ItemID;
import net.runelite.api.MenuOpcode;
import net.runelite.api.NullObjectID;
import static net.runelite.api.Perspective.SCENE_SIZE;
import net.runelite.api.Player;
import net.runelite.api.Point;
import net.runelite.api.SpriteID;
import static net.runelite.api.SpriteID.TAB_QUESTS_BROWN_RAIDING_PARTY;
import net.runelite.api.Tile;
import net.runelite.api.VarPlayer;
import net.runelite.api.Varbits;
import net.runelite.api.events.ChatMessage;
import net.runelite.api.events.ClientTick;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.VarbitChanged;
import net.runelite.api.events.WidgetHiddenChanged;
import net.runelite.api.widgets.Widget;
import net.runelite.api.widgets.WidgetInfo;
import net.runelite.client.callback.ClientThread;
import net.runelite.client.chat.ChatColorType;
import net.runelite.client.chat.ChatMessageBuilder;
import net.runelite.client.chat.ChatMessageManager;
import net.runelite.client.chat.QueuedMessage;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.events.OverlayMenuClicked;
import net.runelite.client.game.ItemManager;
import net.runelite.client.game.SpriteManager;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.plugins.raids.solver.Layout;
import net.runelite.client.plugins.raids.solver.LayoutSolver;
import net.runelite.client.plugins.raids.solver.RotationSolver;
import net.runelite.client.ui.ClientToolbar;
import net.runelite.client.ui.NavigationButton;
import net.runelite.client.ui.overlay.OverlayManager;
import net.runelite.client.ui.overlay.OverlayMenuEntry;
import net.runelite.client.ui.overlay.WidgetOverlay;
import net.runelite.client.ui.overlay.infobox.InfoBoxManager;
import net.runelite.client.ui.overlay.tooltip.Tooltip;
import net.runelite.client.ui.overlay.tooltip.TooltipManager;
import net.runelite.client.util.ImageUtil;
import net.runelite.api.util.Text;
import org.apache.commons.lang3.StringUtils;
import static org.apache.commons.lang3.StringUtils.containsIgnoreCase;
import net.runelite.client.ws.PartyMember;
import net.runelite.client.ws.PartyService;
import net.runelite.client.ws.WSClient;
import net.runelite.http.api.ws.messages.party.PartyChatMessage;
@PluginDescriptor(
name = "CoX Scouter",
description = "Show helpful information for the Chambers of Xeric raid",
tags = {"combat", "raid", "overlay", "pve", "pvm", "bosses", "cox", "olm", "scout"},
type = PluginType.PVM,
enabledByDefault = false
)
@Singleton
@Slf4j
@Getter(AccessLevel.PACKAGE)
public class RaidsPlugin extends Plugin
{
static final DecimalFormat POINTS_FORMAT = new DecimalFormat("#,###");
private static final int LOBBY_PLANE = 3;
private static final String RAID_START_MESSAGE = "The raid has begun!";
private static final String LEVEL_COMPLETE_MESSAGE = "level complete!";
private static final String RAID_COMPLETE_MESSAGE = "Congratulations - your raid is complete!";
private static final DecimalFormat DECIMAL_FORMAT = new DecimalFormat("###.##");
private static final Pattern ROTATION_REGEX = Pattern.compile("\\[(.*?)]");
private static final Pattern RAID_COMPLETE_REGEX = Pattern.compile("Congratulations - your raid is complete! Duration: ([0-9:]+)");
private static final ImmutableSet<String> GOOD_CRABS_FIRST = ImmutableSet.of(
"FSCCP.PCSCF - #WNWSWN#ESEENW", //both good crabs
"SCSPF.CCSPF - #ESWWNW#ESENES", //both good crabs
"SPCFC.CSPCF - #WWNEEE#WSWNWS", //both good crabs
"SCFCP.CSCFS - #ENEESW#ENWWSW", //good crabs
"SCPFC.CCSSF - #NEESEN#WSWWNE", //good crabs
"SCFPC.CSPCF - #WSWWNE#WSEENE" //good crabs first rare crabs second
);
private static final ImmutableSet<String> GOOD_CRABS_SECOND = ImmutableSet.of(
"FSCCP.PCSCF - #WNWSWN#ESEENW", //both good crabs
"SCSPF.CCSPF - #ESWWNW#ESENES", //both good crabs
"SPCFC.CSPCF - #WWNEEE#WSWNWS", //both good crabs
"SCPFC.CSPCF - #NEEESW#WWNEEE", //rare crabs first good crabs second
"SCFCP.CCSPF - #ESEENW#ESWWNW", //bad crabs first good crabs second
"SCPFC.CSPSF - #WWSEEE#NWSWWN", //bad crabs first good crabs second
"SFCCS.PCPSF - #ENWWSW#ENESEN", //bad crabs first good crabs second
"SPCFC.SCCPF - #ESENES#WWWNEE", //bad crabs first good crabs second
"SPSFP.CCCSF - #NWSWWN#ESEENW", //bad crabs first good crabs second
"FSCCP.PCSCF - #ENWWWS#NEESEN", //bad crabs first good crabs second
"FSCCS.PCPSF - #WSEEEN#WSWNWS" //bad crabs first good crabs second
);
private static final ImmutableSet<String> RARE_CRABS_FIRST = ImmutableSet.of(
"SCPFC.CSPCF - #NEEESW#WWNEEE", //rare crabs first good crabs second
"SCPFC.PCSCF - #WNEEES#NWSWNW", //rare crabs first bad crabs second
"SCPFC.CCPSF - #NWWWSE#WNEESE" //both rare crabs
);
private static final ImmutableSet<String> RARE_CRABS_SECOND = ImmutableSet.of(
"SCPFC.CCPSF - #NWWWSE#WNEESE", //both rare crabs
"FSCPC.CSCPF - #WNWWSE#EENWWW", //bad crabs first rare crabs second
"SCFPC.PCCSF - #WSEENE#WWWSEE", //bad crabs first rare crabs second
"SCFPC.SCPCF - #NESENE#WSWWNE", //bad crabs first rare crabs second
"SFCCP.CSCPF - #WNEESE#NWSWWN", //bad crabs first rare crabs second
"SCFPC.CSPCF - #WSWWNE#WSEENE" //good crabs first rare crabs second
);
private static final Pattern PUZZLES = Pattern.compile("Puzzle - (\\w+)");
@Getter(AccessLevel.NONE)
@Inject
private ChatMessageManager chatMessageManager;
@Getter(AccessLevel.NONE)
@Inject
private InfoBoxManager infoBoxManager;
@Getter(AccessLevel.NONE)
@Inject
private Client client;
@Getter(AccessLevel.NONE)
@Inject
private RaidsConfig config;
@Getter(AccessLevel.NONE)
@Inject
private OverlayManager overlayManager;
@Getter(AccessLevel.NONE)
@Inject
private RaidsOverlay overlay;
@Getter(AccessLevel.NONE)
@Inject
private RaidsPointsOverlay pointsOverlay;
@Getter(AccessLevel.NONE)
@Inject
private RaidsPartyOverlay partyOverlay;
@Getter(AccessLevel.NONE)
@Inject
private LayoutSolver layoutSolver;
@Getter(AccessLevel.NONE)
@Inject
private SpriteManager spriteManager;
@Getter(AccessLevel.NONE)
@Inject
private ClientThread clientThread;
@Getter(AccessLevel.NONE)
@Inject
private TooltipManager tooltipManager;
@Getter(AccessLevel.NONE)
@Inject
private ClientToolbar clientToolbar;
@Getter(AccessLevel.NONE)
@Inject
private ItemManager itemManager;
@Getter(AccessLevel.NONE)
@Inject
private EventBus eventBus;
private boolean raidStarted;
@Inject
private PartyService party;
@Inject
private WSClient ws;
@Getter
private final List<String> roomWhitelist = new ArrayList<>();
@Getter
private final List<String> roomBlacklist = new ArrayList<>();
@Getter
private final List<String> rotationWhitelist = new ArrayList<>();
@Getter
private final List<String> layoutWhitelist = new ArrayList<>();
@Getter
private Raid raid;
private boolean inRaidChambers;
private boolean enhanceScouterTitle;
private boolean hideBackground;
private boolean raidsTimer;
private boolean pointsMessage;
private boolean ptsHr;
private boolean scoutOverlay;
private boolean scoutOverlayAtBank;
private boolean scoutOverlayInRaid;
private boolean displayFloorBreak;
private boolean showRecommendedItems;
private boolean alwaysShowWorldAndCC;
private boolean colorTightrope;
private boolean crabHandler;
private boolean enableRotationWhitelist;
private boolean enableLayoutWhitelist;
private boolean showScavsFarms;
private boolean scavsBeforeIce;
private boolean scavsBeforeOlm;
private boolean hideRopeless;
private boolean hideVanguards;
private boolean hideUnknownCombat;
private boolean partyDisplay;
private int startPlayerCount;
private int upperTime = -1;
private int middleTime = -1;
private int lowerTime = -1;
private int raidTime = -1;
private Color goodCrabColor;
private Color rareCrabColor;
private Color scavPrepColor;
private Color tightropeColor;
private boolean displayLayoutMessage;
private String layoutMessage;
private RaidsTimer timer;
private WidgetOverlay widgetOverlay;
private NavigationButton navButton;
private String recommendedItems;
private String whitelistedRooms;
private String whitelistedRotations;
private String whitelistedLayouts;
private String blacklistedRooms;
private String tooltip;
private String goodCrabs;
private String layoutFullCode;
private List<String> partyMembers = new ArrayList<>();
private List<String> startingPartyMembers = new ArrayList<>();
private Map<String, List<Integer>> recommendedItemsList = new HashMap<>();
private Set<String> missingPartyMembers = new HashSet<>();
@Provides
RaidsConfig provideConfig(ConfigManager configManager)
{
return configManager.getConfig(RaidsConfig.class);
}
@Override
public void configure(Binder binder)
{
binder.bind(RaidsOverlay.class);
}
@Override
protected void startUp() throws Exception
{
updateConfig();
addSubscriptions();
overlayManager.add(overlay);
overlayManager.add(pointsOverlay);
if (this.partyDisplay)
{
overlayManager.add(partyOverlay);
}
updateLists();
clientThread.invokeLater(() -> checkRaidPresence(true));
widgetOverlay = overlayManager.getWidgetOverlay(WidgetInfo.RAIDS_POINTS_INFOBOX);
RaidsPanel panel = injector.getInstance(RaidsPanel.class);
panel.init();
final BufferedImage icon = ImageUtil.getResourceStreamFromClass(this.getClass(), "instancereloadhelper.png");
navButton = NavigationButton.builder()
.tooltip("Raids Reload")
.icon(icon)
.priority(8)
.panel(panel)
.build();
clientToolbar.addNavigation(navButton);
}
@Override
protected void shutDown() throws Exception
{
eventBus.unregister(this);
overlayManager.remove(overlay);
overlayManager.remove(pointsOverlay);
clientToolbar.removeNavigation(navButton);
if (this.partyDisplay)
{
overlayManager.remove(partyOverlay);
}
infoBoxManager.removeInfoBox(timer);
final Widget widget = client.getWidget(WidgetInfo.RAIDS_POINTS_INFOBOX);
if (widget != null)
{
widget.setHidden(false);
}
reset();
}
private void addSubscriptions()
{
eventBus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventBus.subscribe(WidgetHiddenChanged.class, this, this::onWidgetHiddenChanged);
eventBus.subscribe(VarbitChanged.class, this, this::onVarbitChanged);
eventBus.subscribe(ChatMessage.class, this, this::onChatMessage);
eventBus.subscribe(ClientTick.class, this, this::onClientTick);
eventBus.subscribe(OverlayMenuClicked.class, this, this::onOverlayMenuClicked);
}
private void onConfigChanged(ConfigChanged event)
{
if (!event.getGroup().equals("raids"))
{
return;
}
updateConfig();
updateLists();
if (event.getKey().equals("raidsTimer"))
{
updateInfoBoxState();
return;
}
if (event.getKey().equals("partyDisplay"))
{
if (this.partyDisplay)
{
overlayManager.add(partyOverlay);
}
else
{
overlayManager.remove(partyOverlay);
}
}
clientThread.invokeLater(() -> checkRaidPresence(true));
}
private void onWidgetHiddenChanged(WidgetHiddenChanged event)
{
if (!inRaidChambers || event.isHidden())
{
return;
}
Widget widget = event.getWidget();
if (widget == client.getWidget(WidgetInfo.RAIDS_POINTS_INFOBOX))
{
widget.setHidden(true);
}
}
private void onVarbitChanged(VarbitChanged event)
{
checkRaidPresence(false);
if (this.partyDisplay)
{
updatePartyMembers(false);
}
}
private void onChatMessage(ChatMessage event)
{
if (inRaidChambers && event.getType() == ChatMessageType.FRIENDSCHATNOTIFICATION)
{
String message = Text.removeTags(event.getMessage());
Matcher matcher;
if (message.startsWith(RAID_START_MESSAGE))
{
if (this.raidsTimer)
{
timer = new RaidsTimer(this, Instant.now());
spriteManager.getSpriteAsync(TAB_QUESTS_BROWN_RAIDING_PARTY, 0, timer);
infoBoxManager.addInfoBox(timer);
}
if (this.partyDisplay)
{
// Base this on visible players since party size shows people outside the lobby
// and they did not get to come on the raid
List<Player> players = client.getPlayers();
startPlayerCount = players.size();
partyMembers.clear();
startingPartyMembers.clear();
missingPartyMembers.clear();
startingPartyMembers.addAll(Lists.transform(players, Player::getName));
partyMembers.addAll(startingPartyMembers);
}
}
if (timer != null && message.contains(LEVEL_COMPLETE_MESSAGE))
{
timer.timeFloor();
}
if (message.startsWith(RAID_COMPLETE_MESSAGE))
{
if (timer != null)
{
timer.timeOlm();
timer.setStopped(true);
}
updateTooltip();
}
matcher = RAID_COMPLETE_REGEX.matcher(message);
if (matcher.find())
{
raidTime = timeToSeconds(matcher.group(1));
int timesec = timeToSeconds(matcher.group(1));
updateTooltip();
if (this.pointsMessage)
{
int totalPoints = client.getVar(Varbits.TOTAL_POINTS);
int personalPoints = client.getVar(Varbits.PERSONAL_POINTS);
int partySize = client.getVar(Varbits.RAID_PARTY_SIZE);
double percentage = personalPoints / (totalPoints / 100.0);
String chatMessage = new ChatMessageBuilder()
.append(ChatColorType.NORMAL)
.append("Total points: ")
.append(ChatColorType.HIGHLIGHT)
.append(POINTS_FORMAT.format(totalPoints))
.append(ChatColorType.NORMAL)
.append(", Personal points: ")
.append(ChatColorType.HIGHLIGHT)
.append(POINTS_FORMAT.format(personalPoints))
.append(ChatColorType.NORMAL)
.append(" (")
.append(ChatColorType.HIGHLIGHT)
.append(DECIMAL_FORMAT.format(percentage))
.append(ChatColorType.NORMAL)
.append("%)")
.build();
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(chatMessage)
.build());
if (this.ptsHr)
{
String ptssolo;
{
ptssolo = POINTS_FORMAT.format(((float) personalPoints / (float) timesec) * 3600);
}
String ptsteam;
{
ptsteam = POINTS_FORMAT.format(((float) totalPoints / (float) timesec) * 3600);
}
String ptssplit;
{
ptssplit = POINTS_FORMAT.format(((totalPoints / (float) timesec) * 3600) / (partySize));
}
String chatMessage2 = new ChatMessageBuilder()
.append(ChatColorType.NORMAL)
.append("Solo Pts/Hr: ")
.append(ChatColorType.HIGHLIGHT)
.append(ptssolo)
.append(ChatColorType.NORMAL)
.append("Team Pts/Hr: ")
.append(ChatColorType.HIGHLIGHT)
.append(ptsteam)
.build();
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(chatMessage2)
.build());
String chatMessage3 = new ChatMessageBuilder()
.append(ChatColorType.NORMAL)
.append("Split Pts/Hr: ")
.append(ChatColorType.HIGHLIGHT)
.append(ptssplit)
.build();
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(chatMessage3)
.build());
}
}
}
}
}
private void onClientTick(ClientTick event)
{
if (!this.raidsTimer
|| !client.getGameState().equals(GameState.LOGGED_IN)
|| tooltip == null)
{
return;
}
final Point mousePosition = client.getMouseCanvasPosition();
if (widgetOverlay.getBounds().contains(mousePosition.getX(), mousePosition.getY()))
{
tooltipManager.add(new Tooltip(tooltip));
}
}
private void onOverlayMenuClicked(OverlayMenuClicked event)
{
OverlayMenuEntry entry = event.getEntry();
if (entry.getMenuOpcode() == MenuOpcode.RUNELITE_OVERLAY &&
entry.getTarget().equals("Raids party overlay"))
{
switch (entry.getOption())
{
case RaidsPartyOverlay.PARTY_OVERLAY_RESET:
startingPartyMembers.clear();
updatePartyMembers(true);
missingPartyMembers.clear();
break;
case RaidsPartyOverlay.PARTY_OVERLAY_REFRESH:
updatePartyMembers(true);
break;
default:
break;
}
}
}
private void updatePartyMembers(boolean force)
{
int partySize = client.getVar(Varbits.RAID_PARTY_SIZE);
if (partySize <= 0)
{
return;
}
if (startingPartyMembers.size() == partySize && !force)
{
// Skip update if the part is as big as when we started
missingPartyMembers.clear(); // Clear missing members in case someone came back
return;
}
// Only update while in raid
if (client.getVar(VarPlayer.IN_RAID_PARTY) == -1 || force)
{
Widget[] widgets;
try
{
widgets = client.getWidget(WidgetInfo.RAIDING_PARTY).getStaticChildren()[2].getStaticChildren()[3].getDynamicChildren();
}
catch (NullPointerException e)
{
return; // Raid widget not loaded
}
partyMembers.clear();
for (Widget widget : widgets)
{
if (widget == null || widget.getText() == null)
{
continue;
}
String name = widget.getName();
if (name.length() > 1)
{
partyMembers.add(name.substring(name.indexOf('>') + 1, name.indexOf('<', 1)));
}
}
// If we don't have any starting members, update starting members
if (startingPartyMembers.size() == 0 || force)
{
missingPartyMembers.clear();
startingPartyMembers.clear();
startingPartyMembers.addAll(partyMembers);
}
else
{
// Check if anyone left
if (startingPartyMembers.size() > partyMembers.size())
{
missingPartyMembers.clear();
missingPartyMembers.addAll(startingPartyMembers);
missingPartyMembers.removeAll(partyMembers);
}
}
}
}
void checkRaidPresence(boolean force)
{
if (client.getGameState() != GameState.LOGGED_IN)
{
return;
}
boolean setting = client.getVar(Varbits.IN_RAID) == 1;
if (force || inRaidChambers != setting)
{
inRaidChambers = setting;
updateInfoBoxState();
if (inRaidChambers)
{
raid = buildRaid();
if (raid == null)
{
log.debug("Failed to build raid");
return;
}
Layout layout = layoutSolver.findLayout(raid.toCode());
if (layout == null)
{
log.debug("Could not find layout match");
return;
}
layoutFullCode = layout.getCode();
raid.updateLayout(layout);
RotationSolver.solve(raid.getCombatRooms());
setOverlayStatus(true);
sendRaidLayoutMessage();
Matcher puzzleMatch = PUZZLES.matcher(raid.getFullRotationString());
final List<String> puzzles = new ArrayList<>();
while (puzzleMatch.find())
{
puzzles.add(puzzleMatch.group());
}
if (raid.getFullRotationString().contains("Crabs"))
{
switch (puzzles.size())
{
case 1:
goodCrabs = handleCrabs(puzzles.get(0));
break;
case 2:
goodCrabs = handleCrabs(puzzles.get(0), puzzles.get(1));
break;
case 3:
goodCrabs = handleCrabs(puzzles.get(0), puzzles.get(1), puzzles.get(2));
break;
}
}
}
else if (!this.scoutOverlayAtBank)
{
setOverlayStatus(false);
}
}
// If we left party raid was started or we left raid
if (client.getVar(VarPlayer.IN_RAID_PARTY) == -1 && (!inRaidChambers || !this.scoutOverlayInRaid))
{
setOverlayStatus(false);
raidStarted = false;
}
}
private void sendRaidLayoutMessage()
{
if (!this.displayLayoutMessage)
{
return;
}
final String layout = getRaid().getLayout().toCodeString();
final String rooms = getRaid().toRoomString();
final String raidData = "[" + layout + "]: " + rooms;
layoutMessage = new ChatMessageBuilder()
.append(ChatColorType.HIGHLIGHT)
.append("Layout: ")
.append(ChatColorType.NORMAL)
.append(raidData)
.build();
final PartyMember localMember = party.getLocalMember();
if (party.getMembers().isEmpty() || localMember == null)
{
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(layoutMessage)
.build());
}
else
{
final PartyChatMessage message = new PartyChatMessage(layoutMessage);
message.setMemberId(localMember.getMemberId());
ws.send(message);
}
if (recordRaid() != null)
{
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(new ChatMessageBuilder()
.append(ChatColorType.HIGHLIGHT)
.append("You have scouted a record raid, whilst this is a very good raid to do you will probably end up profiting more by selling this raid to a team looking for it.")
.build())
.build());
chatMessageManager.queue(QueuedMessage.builder()
.type(ChatMessageType.FRIENDSCHATNOTIFICATION)
.runeLiteFormattedMessage(new ChatMessageBuilder()
.append(ChatColorType.HIGHLIGHT)
.append("The following are some places you can sell this raid: Scout Trading in We do Raids discord, and Buying Cox Rotations in Oblivion discord.")
.build())
.build());
}
}
private void updateInfoBoxState()
{
if (timer == null)
{
return;
}
if (inRaidChambers && this.raidsTimer)
{
if (!infoBoxManager.getInfoBoxes().contains(timer))
{
infoBoxManager.addInfoBox(timer);
}
}
else
{
infoBoxManager.removeInfoBox(timer);
}
if (!inRaidChambers)
{
timer = null;
}
}
private void updateLists()
{
updateList(roomWhitelist, this.whitelistedRooms);
updateList(roomBlacklist, this.blacklistedRooms);
updateList(rotationWhitelist, this.whitelistedRotations);
updateList(layoutWhitelist, this.whitelistedLayouts);
updateMap(recommendedItemsList, this.recommendedItems);
}
private void updateMap(Map<String, List<Integer>> map, String input)
{
map.clear();
Matcher m = ROTATION_REGEX.matcher(input);
while (m.find())
{
String everything = m.group(1).toLowerCase();
int split = everything.indexOf(',');
if (split < 0)
{
continue;
}
String key = everything.substring(0, split);
if (key.length() < 1)
{
continue;
}
List<String> itemNames = Text.fromCSV(everything.substring(split));
map.computeIfAbsent(key, k -> new ArrayList<>());
for (String itemName : itemNames)
{
if (itemName.equals(""))
{
continue;
}
if (itemName.equals("ice barrage"))
{
map.get(key).add(SpriteID.SPELL_ICE_BARRAGE);
}
else if (itemName.startsWith("salve"))
{
map.get(key).add(ItemID.SALVE_AMULETEI);
}
else if (itemManager.search(itemName).size() > 0)
{
map.get(key).add(itemManager.search(itemName).get(0).getId());
}
else
{
log.info("RaidsPlugin: Could not find an item ID for item: " + itemName);
}
}
}
}
private void updateList(List<String> list, String input)
{
list.clear();
if (list == this.rotationWhitelist)
{
Matcher m = ROTATION_REGEX.matcher(input);
while (m.find())
{
String rotation = m.group(1).toLowerCase();
if (!list.contains(rotation))
{
list.add(rotation);
}
}
}
else
{
list.addAll(Text.fromCSV(input.toLowerCase()));
}
}
int getRotationMatches()
{
String rotation = raid.getRotationString().toLowerCase();
List<String> bosses = Text.fromCSV(rotation);
if (rotationWhitelist.contains(rotation))
{
return bosses.size();
}
for (String whitelisted : rotationWhitelist)
{
int matches = 0;
List<String> whitelistedBosses = Text.fromCSV(whitelisted);
for (int i = 0; i < whitelistedBosses.size(); i++)
{
if (i < bosses.size() && whitelistedBosses.get(i).equals(bosses.get(i)))
{
matches++;
}
else
{
matches = 0;
break;
}
}
if (matches >= 2)
{
return matches;
}
}
return 0;
}
private Point findLobbyBase()
{
Tile[][] tiles = client.getScene().getTiles()[LOBBY_PLANE];
for (int x = 0; x < SCENE_SIZE; x++)
{
for (int y = 0; y < SCENE_SIZE; y++)
{
if (tiles[x][y] == null || tiles[x][y].getWallObject() == null)
{
continue;
}
if (tiles[x][y].getWallObject().getId() == NullObjectID.NULL_12231)
{
return tiles[x][y].getSceneLocation();
}
}
}
return null;
}
private Raid buildRaid()
{
Point gridBase = findLobbyBase();
if (gridBase == null)
{
return null;
}
Raid raid = new Raid();
Tile[][] tiles;
int position, x, y, offsetX;
int startX = -2;
for (int plane = 3; plane > 1; plane--)
{
tiles = client.getScene().getTiles()[plane];
if (tiles[gridBase.getX() + RaidRoom.ROOM_MAX_SIZE][gridBase.getY()] == null)
{
position = 1;
}
else
{
position = 0;
}
for (int i = 1; i > -2; i--)
{
y = gridBase.getY() + (i * RaidRoom.ROOM_MAX_SIZE);
for (int j = startX; j < 4; j++)
{
x = gridBase.getX() + (j * RaidRoom.ROOM_MAX_SIZE);
offsetX = 0;
if (x > SCENE_SIZE && position > 1 && position < 4)
{
position++;
}
if (x < 0)
{
offsetX = Math.abs(x) + 1; //add 1 because the tile at x=0 will always be null
}
if (x < SCENE_SIZE && y >= 0 && y < SCENE_SIZE)
{
if (tiles[x + offsetX][y] == null)
{
if (position == 4)
{
position++;
break;
}
continue;
}
if (position == 0 && startX != j)
{
startX = j;
}
Tile base = tiles[offsetX > 0 ? 1 : x][y];
RaidRoom room = determineRoom(base);
raid.setRoom(room, position + Math.abs((plane - 3) * 8));
position++;
}
}
}
}
return raid;
}
private RaidRoom determineRoom(Tile base)
{
RaidRoom room = new RaidRoom(base, RaidRoom.Type.EMPTY);
int chunkData = client.getInstanceTemplateChunks()[base.getPlane()][(base.getSceneLocation().getX()) / 8][base.getSceneLocation().getY() / 8];
InstanceTemplates template = InstanceTemplates.findMatch(chunkData);
if (template == null)
{
return room;
}
switch (template)
{
case RAIDS_LOBBY:
case RAIDS_START:
room.setType(RaidRoom.Type.START);
break;
case RAIDS_END:
room.setType(RaidRoom.Type.END);
break;
case RAIDS_SCAVENGERS:
case RAIDS_SCAVENGERS2:
room.setType(RaidRoom.Type.SCAVENGERS);
break;
case RAIDS_SHAMANS:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.SHAMANS);
break;
case RAIDS_VASA:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.VASA);
break;
case RAIDS_VANGUARDS:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.VANGUARDS);
break;
case RAIDS_ICE_DEMON:
room.setType(RaidRoom.Type.PUZZLE);
room.setPuzzle(RaidRoom.Puzzle.ICE_DEMON);
break;
case RAIDS_THIEVING:
room.setType(RaidRoom.Type.PUZZLE);
room.setPuzzle(RaidRoom.Puzzle.THIEVING);
break;
case RAIDS_FARMING:
case RAIDS_FARMING2:
room.setType(RaidRoom.Type.FARMING);
break;
case RAIDS_MUTTADILES:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.MUTTADILES);
break;
case RAIDS_MYSTICS:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.MYSTICS);
break;
case RAIDS_TEKTON:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.TEKTON);
break;
case RAIDS_TIGHTROPE:
room.setType(RaidRoom.Type.PUZZLE);
room.setPuzzle(RaidRoom.Puzzle.TIGHTROPE);
break;
case RAIDS_GUARDIANS:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.GUARDIANS);
break;
case RAIDS_CRABS:
room.setType(RaidRoom.Type.PUZZLE);
room.setPuzzle(RaidRoom.Puzzle.CRABS);
break;
case RAIDS_VESPULA:
room.setType(RaidRoom.Type.COMBAT);
room.setBoss(RaidRoom.Boss.VESPULA);
break;
}
return room;
}
public void reset()
{
raid = null;
upperTime = -1;
middleTime = -1;
lowerTime = -1;
raidTime = -1;
tooltip = null;
inRaidChambers = false;
widgetOverlay = null;
raidStarted = false;
timer = null;
}
private int timeToSeconds(String s)
{
int seconds = -1;
String[] split = s.split(":");
if (split.length == 2)
{
seconds = Integer.parseInt(split[0]) * 60 + Integer.parseInt(split[1]);
}
if (split.length == 3)
{
seconds = Integer.parseInt(split[0]) * 3600 + Integer.parseInt(split[1]) * 60 + Integer.parseInt(split[2]);
}
return seconds;
}
private String secondsToTime(int seconds)
{
StringBuilder builder = new StringBuilder();
if (seconds >= 3600)
{
builder.append((int) Math.floor(seconds / 3600)).append(";");
}
seconds %= 3600;
if (builder.length() == 0)
{
builder.append((int) Math.floor(seconds / 60));
}
else
{
builder.append(StringUtils.leftPad(String.valueOf((int) Math.floor(seconds / 60)), 2, '0'));
}
builder.append(":");
seconds %= 60;
builder.append(StringUtils.leftPad(String.valueOf(seconds), 2, '0'));
return builder.toString();
}
private void updateTooltip()
{
StringBuilder builder = new StringBuilder();
if (upperTime == -1)
{
tooltip = null;
return;
}
builder.append("Upper level: ").append(secondsToTime(upperTime));
if (middleTime == -1)
{
if (lowerTime == -1)
{
tooltip = builder.toString();
return;
}
else
{
builder.append("</br>Lower level: ").append(secondsToTime(lowerTime - upperTime));
}
}
else
{
builder.append("</br>Middle level: ").append(secondsToTime(middleTime - upperTime));
if (lowerTime == -1)
{
tooltip = builder.toString();
return;
}
else
{
builder.append("</br>Lower level: ").append(secondsToTime(lowerTime - middleTime));
}
}
if (raidTime == -1)
{
tooltip = builder.toString();
return;
}
builder.append("</br>Olm: ").append(secondsToTime(raidTime - lowerTime));
tooltip = builder.toString();
}
private String handleCrabs(String firstGroup)
{
if (firstGroup.contains("Crabs") && GOOD_CRABS_FIRST.contains(layoutFullCode))
{
return "Good Crabs";
}
if (firstGroup.contains("Crabs") && RARE_CRABS_FIRST.contains(layoutFullCode))
{
return "Rare Crabs";
}
return null;
}
private String handleCrabs(String firstGroup, String secondGroup)
{
if (firstGroup.contains("Crabs") && GOOD_CRABS_FIRST.contains(layoutFullCode))
{
return "Good Crabs";
}
if (secondGroup.contains("Crabs") && GOOD_CRABS_SECOND.contains(layoutFullCode))
{
return "Good Crabs";
}
if (firstGroup.contains("Crabs") && RARE_CRABS_FIRST.contains(layoutFullCode))
{
return "Rare Crabs";
}
if (secondGroup.contains("Crabs") && RARE_CRABS_SECOND.contains(layoutFullCode))
{
return "Rare Crabs";
}
return null;
}
private String handleCrabs(String firstGroup, String secondGroup, String thirdGroup)
{
if (firstGroup.contains("Crabs"))
{
return "Good Crabs";
}
if (secondGroup.contains("Crabs"))
{
return "Rare Crabs";
}
if (thirdGroup.contains("Crabs"))
{
return "Rare Crabs";
}
return null;
}
String recordRaid()
{
if (raid.getRotationString().equalsIgnoreCase("vasa,tekton,vespula")
&& containsIgnoreCase(raid.getFullRotationString(), "crabs")
&& containsIgnoreCase(raid.getFullRotationString(), "tightrope")
&& goodCrabs != null)
{
return goodCrabs;
}
return null;
}
private void setOverlayStatus(boolean bool)
{
overlay.setScoutOverlayShown(bool);
}
private void updateConfig()
{
this.enhanceScouterTitle = config.enhanceScouterTitle();
this.hideBackground = config.hideBackground();
this.raidsTimer = config.raidsTimer();
this.pointsMessage = config.pointsMessage();
this.ptsHr = config.ptsHr();
this.scoutOverlay = config.scoutOverlay();
this.scoutOverlayAtBank = config.scoutOverlayAtBank();
this.scoutOverlayInRaid = config.scoutOverlayInRaid();
this.displayFloorBreak = config.displayFloorBreak();
this.showRecommendedItems = config.showRecommendedItems();
this.recommendedItems = config.recommendedItems();
this.alwaysShowWorldAndCC = config.alwaysShowWorldAndCC();
this.displayLayoutMessage = config.displayLayoutMessage();
this.colorTightrope = config.colorTightrope();
this.tightropeColor = config.tightropeColor();
this.crabHandler = config.crabHandler();
this.goodCrabColor = config.goodCrabColor();
this.rareCrabColor = config.rareCrabColor();
this.enableRotationWhitelist = config.enableRotationWhitelist();
this.whitelistedRotations = config.whitelistedRotations();
this.enableLayoutWhitelist = config.enableLayoutWhitelist();
this.whitelistedLayouts = config.whitelistedLayouts();
this.showScavsFarms = config.showScavsFarms();
this.scavsBeforeIce = config.scavsBeforeIce();
this.scavsBeforeOlm = config.scavsBeforeOlm();
this.scavPrepColor = config.scavPrepColor();
this.whitelistedRooms = config.whitelistedRooms();
this.blacklistedRooms = config.blacklistedRooms();
this.hideRopeless = config.hideRopeless();
this.hideVanguards = config.hideVanguards();
this.hideUnknownCombat = config.hideUnknownCombat();
this.partyDisplay = config.partyDisplay();
}
}
| 1 | 15,511 | private static final, and it should be located in raidsoverlay, as it's not needed in the plugin. also the name could be a bit better, DC_SCOUT_RAIDS or similiar. | open-osrs-runelite | java |
@@ -173,7 +173,7 @@ STATIC fpga_result parse_perf_attributes(struct udev_device *dev,
globfree(&pglob);
return FPGA_EXCEPTION;
}
- if (fscanf(file, "%s", attr_value) != 1) {
+ if (fscanf(file, "%127s", attr_value) != 1) {
OPAE_ERR("Failed to read %s", pglob.gl_pathv[i]);
goto out;
} | 1 | // Copyright(c) 2021, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "fpgaperf_counter.h"
#include <errno.h>
#include <glob.h>
#include <regex.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <libudev.h>
#include <linux/perf_event.h>
#include <opae/fpga.h>
#include <opae/log.h>
#include <opae/properties.h>
#include <opae/utils.h>
#include "opae_int.h"
#define PCI_DEV_ADDRS "/sys/bus/pci/devices/*%x*:*%x*:*%x*.*%x*/"
#define DFL_PERF_FME PCI_DEV_ADDRS "fpga_region/region*/dfl-fme.*"
#define DFL_PERF_SYSFS "/sys/bus/event_source/devices/dfl_fme"
#define PERF_EVENT "event=(0x[0-9a-fA-F]{2}),"
#define PERF_EVTYPE "evtype=(0x[0-9a-fA-F]{2}),"
#define PERF_PORTID "portid=(0x[0-9a-fA-F]{2})"
#define PERF_EVENT_PATTERN PERF_EVENT PERF_EVTYPE PERF_PORTID
#define PERF_CONFIG_PATTERN "config:([0-9]{1,})-([0-9]{2,})"
#define PARSE_MATCH_INT(_p, _m, _v, _b) \
do { \
errno = 0; \
_v = strtoul(_p + _m, NULL, _b); \
if (errno) { \
OPAE_MSG("error parsing int"); \
} \
} while (0)
/* Read format structure*/
struct read_format {
uint64_t nr;
struct {
uint64_t value;
uint64_t id;
} values[];
};
/*
* Check perf handle object for validity and lock its mutex
* If fpga_perf_check_and_lock() returns FPGA_OK, assume the mutex to be
* locked.
*/
STATIC fpga_result fpga_perf_check_and_lock(fpga_perf_counter *fpga_perf)
{
int res = 0;
if (!fpga_perf)
return FPGA_INVALID_PARAM;
if (opae_mutex_lock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf->magic != FPGA_PERF_MAGIC) {
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_INVALID_PARAM;
}
return FPGA_OK;
}
/* parse the each format and get the shift val
* parse the events for the particular device directory */
STATIC fpga_result parse_perf_attributes(struct udev_device *dev,
fpga_perf_counter *fpga_perf, const char *attr)
{
regex_t re;
char err[128] = { 0 };
int reg_res = 0;
uint64_t loop = 0;
uint64_t inner_loop = 0;
uint64_t value = 0;
char attr_path[DFL_PERF_STR_MAX] = { 0,};
char attr_value[128] = { 0,};
int gres = 0;
size_t i = 0;
FILE *file = NULL;
glob_t pglob;
regmatch_t f_matches[3] = { {0} };
regmatch_t e_matches[4] = { {0} };
if (!dev || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (snprintf(attr_path, sizeof(attr_path), "%s/%s/*",
udev_device_get_syspath(dev), attr) < 0) {
OPAE_ERR("snprintf buffer overflow");
return FPGA_EXCEPTION;
}
gres = glob(attr_path, GLOB_NOSORT, NULL, &pglob);
if (gres || !pglob.gl_pathc) {
OPAE_ERR("Failed pattern match %s", attr_path);
globfree(&pglob);
return FPGA_EXCEPTION;
}
if (strcmp(attr, "format") == 0 ) {
fpga_perf->num_format = pglob.gl_pathc;
if (!fpga_perf->format_type) {
fpga_perf->format_type = calloc(fpga_perf->num_format,
sizeof(perf_format_type));
if (!fpga_perf->format_type) {
fpga_perf->num_format = 0;
OPAE_ERR("Failed to allocate Memory");
globfree(&pglob);
return FPGA_NO_MEMORY;
}
}
} else {
fpga_perf->num_perf_events = pglob.gl_pathc;
if (!fpga_perf->perf_events) {
fpga_perf->perf_events = calloc(fpga_perf->num_perf_events,
sizeof(perf_events_type));
if (!fpga_perf->perf_events) {
fpga_perf->num_perf_events = 0;
OPAE_ERR("Failed to allocate Memory");
globfree(&pglob);
return FPGA_NO_MEMORY;
}
}
}
for (i = 0; i < pglob.gl_pathc; i++) {
file = fopen(pglob.gl_pathv[i], "r");
if (!file) {
OPAE_ERR("fopen(%s) failed\n", pglob.gl_pathv[i]);
globfree(&pglob);
return FPGA_EXCEPTION;
}
if (fscanf(file, "%s", attr_value) != 1) {
OPAE_ERR("Failed to read %s", pglob.gl_pathv[i]);
goto out;
}
if (strcmp(attr, "format") == 0 ) {
reg_res = regcomp(&re, PERF_CONFIG_PATTERN,
REG_EXTENDED | REG_ICASE);
if (reg_res) {
OPAE_ERR("Error compiling regex");
goto out;
}
reg_res = regexec(&re, attr_value, 4, f_matches, 0);
if (reg_res) {
regerror(reg_res, &re, err, sizeof(err));
OPAE_MSG("Error executing regex: %s", err);
} else {
PARSE_MATCH_INT(attr_value, f_matches[1].rm_so, value, 10);
fpga_perf->format_type[loop].shift = value;
if (snprintf(fpga_perf->format_type[loop].format_name,
sizeof(fpga_perf->format_type[loop].format_name),
"%s", (strstr(pglob.gl_pathv[i], attr)
+ strlen(attr)+1)) < 0) {
OPAE_ERR("snprintf buffer overflow");
goto out;
}
loop++;
}
} else {
reg_res = regcomp(&re, PERF_EVENT_PATTERN,
REG_EXTENDED | REG_ICASE);
if (reg_res) {
OPAE_ERR("Error compiling regex");
goto out;
}
reg_res = regexec(&re, attr_value, 4, e_matches, 0);
if (reg_res) {
regerror(reg_res, &re, err, sizeof(err));
OPAE_MSG("Error executing regex: %s", err);
} else {
uint64_t config = 0;
uint64_t event = 0;
if (snprintf(fpga_perf->perf_events[inner_loop].event_name,
sizeof(fpga_perf->perf_events[inner_loop].event_name),
"%s", (strstr(pglob.gl_pathv[i], attr)
+ strlen(attr) + 1)) < 0) {
OPAE_ERR("snprintf buffer overflow");
goto out;
}
for (loop = 0; loop < fpga_perf->num_format; loop++) {
PARSE_MATCH_INT(attr_value,
e_matches[loop + 1].rm_so, event, 16);
config |= event << fpga_perf->format_type[loop].shift;
}
fpga_perf->perf_events[inner_loop].config = config;
inner_loop++;
}
}
fclose(file);
}
globfree(&pglob);
return FPGA_OK;
out:
fclose(file);
globfree(&pglob);
return FPGA_EXCEPTION;
}
STATIC fpga_result fpga_perf_events(char* perf_sysfs_path, fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
struct udev *udev = NULL;
struct udev_device *dev = NULL;
int fd = 0;
int grpfd = 0;
uint64_t loop = 0;
struct perf_event_attr pea;
if (!perf_sysfs_path || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
/* create udev object */
udev = udev_new();
if (!udev) {
OPAE_ERR("Cannot create udev context");
return FPGA_EXCEPTION;
}
dev = udev_device_new_from_syspath(udev, perf_sysfs_path);
if (!dev) {
OPAE_ERR("Failed to get device");
udev_unref(udev);
return FPGA_EXCEPTION;
}
const char * ptr = udev_device_get_sysattr_value(dev, "cpumask");
if (ptr)
PARSE_MATCH_INT(ptr, 0, fpga_perf->cpumask, 10);
ptr = udev_device_get_sysattr_value(dev, "type");
if (ptr)
PARSE_MATCH_INT(ptr, 0, fpga_perf->type, 10);
/* parse the format value */
ret = parse_perf_attributes(dev, fpga_perf, "format");
if (ret != FPGA_OK)
goto out;
/* parse the event value */
ret = parse_perf_attributes(dev, fpga_perf, "events");
if (ret != FPGA_OK)
goto out;
/* initialize the pea structure to 0 */
memset(&pea, 0, sizeof(struct perf_event_attr));
for (loop = 0; loop < fpga_perf->num_perf_events; loop++) {
if (fpga_perf->perf_events[0].fd <= 0)
grpfd = -1;
else
grpfd = fpga_perf->perf_events[0].fd;
if (!fpga_perf->perf_events[loop].config)
continue;
pea.type = fpga_perf->type;
pea.size = sizeof(struct perf_event_attr);
pea.config = fpga_perf->perf_events[loop].config;
pea.disabled = 1;
pea.inherit = 1;
pea.sample_type = PERF_SAMPLE_IDENTIFIER;
pea.read_format = PERF_FORMAT_GROUP | PERF_FORMAT_ID;
fd = syscall(__NR_perf_event_open, &pea, -1, fpga_perf->cpumask, grpfd, 0);
if (fd == -1) {
OPAE_ERR("Error opening leader %llx\n", pea.config);
ret = FPGA_EXCEPTION;
goto out;
} else {
fpga_perf->perf_events[loop].fd = fd;
if (ioctl(fpga_perf->perf_events[loop].fd, PERF_EVENT_IOC_ID,
&fpga_perf->perf_events[loop].id) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ID ioctl failed: %s",
strerror(errno));
ret = FPGA_EXCEPTION;
goto out;
}
}
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_RESET,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_RESET ioctl failed: %s", strerror(errno));
ret = FPGA_EXCEPTION;
goto out;
}
out:
udev_device_unref(dev);
udev_unref(udev);
return ret;
}
/* get fpga sbdf from token */
STATIC fpga_result get_fpga_sbdf(fpga_token token,
uint16_t *segment,
uint8_t *bus,
uint8_t *device,
uint8_t *function)
{
fpga_result res = FPGA_OK;
fpga_properties props = NULL;
if (!segment || !bus ||
!device || !function) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
res = fpgaGetProperties(token, &props);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get properties");
return res;
}
res = fpgaPropertiesGetBus(props, bus);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get bus");
return res;
}
res = fpgaPropertiesGetSegment(props, segment);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Segment");
return res;
}
res = fpgaPropertiesGetDevice(props, device);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Device");
return res;
}
res = fpgaPropertiesGetFunction(props, function);
if (res != FPGA_OK) {
OPAE_ERR("Failed to get Function");
return res;
}
return res;
}
/* Initialises magic number, mutex attributes and set the mutex attribute
* type to PTHREAD_MUTEX_RECURSIVE. Also initialises the mutex referenced by
* fpga_perf->lock with attributes specified by mutex attributes */
STATIC fpga_result fpga_perf_mutex_init(fpga_perf_counter *fpga_perf)
{
pthread_mutexattr_t mattr;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
fpga_perf->magic = FPGA_PERF_MAGIC;
if (pthread_mutexattr_init(&mattr)) {
OPAE_ERR("pthread_mutexattr_init() failed");
return FPGA_EXCEPTION;
}
if (pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE)) {
OPAE_ERR("pthread_mutexattr_settype() failed");
pthread_mutexattr_destroy(&mattr);
return FPGA_EXCEPTION;
}
if (pthread_mutex_init(&fpga_perf->lock, &mattr)) {
OPAE_ERR("pthread_mutex_init() failed");
pthread_mutexattr_destroy(&mattr);
return FPGA_EXCEPTION;
}
pthread_mutexattr_destroy(&mattr);
return FPGA_OK;
}
/* Reset the magic number and destroy the mutex created */
STATIC fpga_result fpga_perf_mutex_destroy(fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
int res = 0;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
ret = fpga_perf_check_and_lock(fpga_perf);
if (ret) {
OPAE_ERR("Failed to lock perf mutex");
return ret;
}
fpga_perf->magic = 0;
ret = opae_mutex_unlock(res, &fpga_perf->lock);
if (ret) {
OPAE_ERR("Failed to unlock perf mutex");
return ret;
}
ret = pthread_mutex_destroy(&fpga_perf->lock);
if (ret) {
OPAE_ERR("Failed to destroy pthread mutex destroy");
return ret;
}
return FPGA_OK;
}
fpga_result fpgaPerfCounterGet(fpga_token token, fpga_perf_counter *fpga_perf)
{
fpga_result ret = FPGA_OK;
int res = 0;
char sysfs_path[DFL_PERF_STR_MAX] = { 0 };
char sysfs_perf[DFL_PERF_STR_MAX] = { 0 };
int gres = 0;
uint32_t fpga_id = -1;
char *endptr = NULL;
glob_t pglob;
uint8_t bus = (uint8_t)-1;
uint16_t segment = (uint16_t)-1;
uint8_t device = (uint8_t)-1;
uint8_t function = (uint8_t)-1;
if (!token || !fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
memset(fpga_perf, 0, sizeof(fpga_perf_counter));
ret = get_fpga_sbdf(token, &segment, &bus, &device, &function);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to get sbdf");
return ret;
}
ret = fpga_perf_mutex_init(fpga_perf);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to initialize the mutex");
return ret;
}
/* when we bind with new device id we will get updated function value */
/* not able to read the sysfs path using that */
if(function)
function = 0;
if (snprintf(sysfs_path, sizeof(sysfs_path),
DFL_PERF_FME,
segment, bus, device, function) < 0) {
OPAE_ERR("snprintf buffer overflow");
return FPGA_EXCEPTION;
}
gres = glob(sysfs_path, GLOB_NOSORT, NULL, &pglob);
if (gres) {
OPAE_ERR("Failed pattern match %s: %s", sysfs_path, strerror(errno));
globfree(&pglob);
return FPGA_NOT_FOUND;
}
if (pglob.gl_pathc == 1) {
char *ptr = strstr(pglob.gl_pathv[0], "fme");
if (!ptr) {
ret = FPGA_INVALID_PARAM;
goto out;
}
errno = 0;
fpga_id = strtoul(ptr + 4, &endptr, 10);
if (snprintf(sysfs_perf, sizeof(sysfs_perf),
DFL_PERF_SYSFS"%d", fpga_id) < 0) {
OPAE_ERR("snprintf buffer overflow");
ret = FPGA_EXCEPTION;
goto out;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
ret = FPGA_EXCEPTION;
goto out;
}
if (snprintf(fpga_perf->dfl_fme_name, sizeof(fpga_perf->dfl_fme_name),
"dfl_fme%d", fpga_id) < 0) {
OPAE_ERR("snprintf buffer overflow");
opae_mutex_unlock(res, &fpga_perf->lock);
ret = FPGA_EXCEPTION;
goto out;
}
ret = fpga_perf_events(sysfs_perf, fpga_perf);
if (ret != FPGA_OK) {
OPAE_ERR("Failed to parse fpga perf event");
opae_mutex_unlock(res, &fpga_perf->lock);
goto out;
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
ret = FPGA_EXCEPTION;
goto out;
}
} else {
ret = FPGA_NOT_FOUND;
goto out;
}
out:
globfree(&pglob);
return ret;
}
fpga_result fpgaPerfCounterStartRecord(fpga_perf_counter *fpga_perf)
{
uint64_t loop = 0;
uint64_t inner_loop = 0;
int res = 0;
char buf[DFL_PERF_STR_MAX] = { 0 };
struct read_format *rdft = (struct read_format *) buf;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_ENABLE,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ENABLE ioctl failed: %s",
strerror(errno));
goto out;
}
if (read(fpga_perf->perf_events[0].fd, rdft, sizeof(buf)) == -1) {
OPAE_ERR("read fpga perf counter failed");
goto out;
}
for (loop = 0; loop < (uint64_t)rdft->nr; loop++) {
for (inner_loop = 0; inner_loop < fpga_perf->num_perf_events;
inner_loop++) {
if (rdft->values[loop].id == fpga_perf->perf_events[inner_loop].id)
fpga_perf->perf_events[inner_loop].start_value = rdft->values[loop].value;
}
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
out:
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_EXCEPTION;
}
fpga_result fpgaPerfCounterStopRecord(fpga_perf_counter *fpga_perf)
{
char buf[DFL_PERF_STR_MAX] = { 0 };
uint64_t loop = 0;
uint64_t inner_loop = 0;
int res = 0;
struct read_format *rdft = (struct read_format *) buf;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (ioctl(fpga_perf->perf_events[0].fd, PERF_EVENT_IOC_DISABLE,
PERF_IOC_FLAG_GROUP) == -1) {
OPAE_ERR("PERF_EVENT_IOC_ENABLE ioctl failed: %s",
strerror(errno));
goto out;
}
if (read(fpga_perf->perf_events[0].fd, rdft, sizeof(buf)) == -1) {
OPAE_ERR("read fpga perf counter failed");
goto out;
}
for (loop = 0; loop < (uint64_t)rdft->nr; loop++) {
for (inner_loop = 0; inner_loop < fpga_perf->num_perf_events;
inner_loop++) {
if (rdft->values[loop].id == fpga_perf->perf_events[inner_loop].id)
fpga_perf->perf_events[inner_loop].stop_value = rdft->values[loop].value;
}
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
out:
opae_mutex_unlock(res, &fpga_perf->lock);
return FPGA_EXCEPTION;
}
fpga_result fpgaPerfCounterPrint(FILE *f, fpga_perf_counter *fpga_perf)
{
uint64_t loop = 0;
int res = 0;
if (!fpga_perf || !f) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
fprintf(f, "\n");
for (loop = 0; loop < fpga_perf->num_perf_events; loop++)
fprintf(f, "%s\t", fpga_perf->perf_events[loop].event_name);
fprintf(f, "\n");
for (loop = 0; loop < fpga_perf->num_perf_events; loop++) {
if (!fpga_perf->perf_events[loop].config)
continue;
fprintf(f, "%ld\t\t", (fpga_perf->perf_events[loop].stop_value
- fpga_perf->perf_events[loop].start_value));
}
fprintf(f, "\n");
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
fpga_result fpgaPerfCounterDestroy(fpga_perf_counter *fpga_perf)
{
int res = 0;
if (!fpga_perf) {
OPAE_ERR("Invalid input parameters");
return FPGA_INVALID_PARAM;
}
if (fpga_perf_check_and_lock(fpga_perf)) {
OPAE_ERR("Failed to lock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf->format_type) {
free(fpga_perf->format_type);
fpga_perf->format_type = NULL;
}
if (fpga_perf->perf_events) {
free(fpga_perf->perf_events);
fpga_perf->perf_events = NULL;
}
if (opae_mutex_unlock(res, &fpga_perf->lock)) {
OPAE_ERR("Failed to unlock perf mutex");
return FPGA_EXCEPTION;
}
if (fpga_perf_mutex_destroy(fpga_perf) != FPGA_OK) {
OPAE_ERR("Failed to destroy the mutex");
return FPGA_EXCEPTION;
}
return FPGA_OK;
}
| 1 | 21,181 | attr_value is 128 bytes? | OPAE-opae-sdk | c |
@@ -54,7 +54,9 @@ class HTTPRequest(Request):
msg = "Option 'url' is mandatory for request but not found in %s" % config
self.url = self.config.get("url", TaurusConfigError(msg))
self.label = self.config.get("label", self.url)
- self.method = self.config.get("method", "GET").upper()
+ self.method = self.config.get("method", "GET")
+ if not has_variable_pattern(self.method):
+ self.method = self.method.upper()
# TODO: add method to join dicts/lists from scenario/request level?
self.headers = self.config.get("headers", {}) | 1 | """
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import traceback
import mimetypes
import re
from bzt import TaurusConfigError, TaurusInternalException
from bzt.utils import ensure_is_dict, dehumanize_time
VARIABLE_PATTERN = re.compile("\${.+\}")
def has_variable_pattern(val):
return bool(VARIABLE_PATTERN.search(val))
class Request(object):
NAME = "request"
def __init__(self, config, scenario=None):
self.config = config
self.scenario = scenario
def priority_option(self, name, default=None):
val = self.config.get(name, None)
if val is None:
val = self.scenario.get(name, None)
if val is None and default is not None:
val = default
return val
class HTTPRequest(Request):
NAME = "request"
def __init__(self, config, scenario, engine):
self.engine = engine
self.log = self.engine.log.getChild(self.__class__.__name__)
super(HTTPRequest, self).__init__(config, scenario)
msg = "Option 'url' is mandatory for request but not found in %s" % config
self.url = self.config.get("url", TaurusConfigError(msg))
self.label = self.config.get("label", self.url)
self.method = self.config.get("method", "GET").upper()
# TODO: add method to join dicts/lists from scenario/request level?
self.headers = self.config.get("headers", {})
self.keepalive = self.config.get('keepalive', None)
self.timeout = self.config.get('timeout', None)
self.think_time = self.config.get('think-time', None)
self.follow_redirects = self.config.get('follow-redirects', None)
self.body = self.__get_body()
def __get_body(self):
body = self.config.get('body', None)
body_file = self.config.get('body-file', None)
if body_file:
if body:
self.log.warning('body and body-file fields are found, only first will take effect')
else:
if self.method in ("PUT", "POST") and has_variable_pattern(body_file):
return
body_file_path = self.engine.find_file(body_file)
with open(body_file_path) as fhd:
body = fhd.read()
return body
class HierarchicHTTPRequest(HTTPRequest):
def __init__(self, config, scenario, engine):
super(HierarchicHTTPRequest, self).__init__(config, scenario, engine)
self.upload_files = self.config.get("upload-files", [])
if self.method == "PUT" and len(self.upload_files) > 1:
self.upload_files = self.upload_files[:1]
for file_dict in self.upload_files:
param = file_dict.get("param", None)
if self.method == "PUT":
file_dict["param"] = ""
if self.method == "POST" and not param:
raise TaurusConfigError("Items from upload-files must specify parameter name")
path_exc = TaurusConfigError("Items from upload-files must specify path to file")
path = str(file_dict.get("path", path_exc))
if not has_variable_pattern(path): # exclude variables
path = self.engine.find_file(path) # prepare full path for jmx
else:
msg = "Path '%s' contains variable and can't be expanded. Don't use relative paths in 'upload-files'!"
self.log.warning(msg % path)
file_dict["path"] = path
mime = mimetypes.guess_type(file_dict["path"])[0] or "application/octet-stream"
file_dict.get("mime-type", mime, force_set=True)
self.content_encoding = self.config.get('content-encoding', None)
class IfBlock(Request):
NAME = "if"
def __init__(self, condition, then_clause, else_clause, config):
super(IfBlock, self).__init__(config)
self.condition = condition
self.then_clause = then_clause
self.else_clause = else_clause
def __repr__(self):
then_clause = [repr(req) for req in self.then_clause]
else_clause = [repr(req) for req in self.else_clause]
return "IfBlock(condition=%s, then=%s, else=%s)" % (self.condition, then_clause, else_clause)
class OnceBlock(Request):
NAME = "once"
def __init__(self, requests, config):
super(OnceBlock, self).__init__(config)
self.requests = requests
def __repr__(self):
requests = [repr(req) for req in self.requests]
return "OnceBlock(requests=%s)" % requests
class LoopBlock(Request):
NAME = "loop"
def __init__(self, loops, requests, config):
super(LoopBlock, self).__init__(config)
self.loops = loops
self.requests = requests
def __repr__(self):
requests = [repr(req) for req in self.requests]
return "LoopBlock(loops=%s, requests=%s)" % (self.loops, requests)
class WhileBlock(Request):
NAME = "while"
def __init__(self, condition, requests, config):
super(WhileBlock, self).__init__(config)
self.condition = condition
self.requests = requests
def __repr__(self):
requests = [repr(req) for req in self.requests]
return "WhileBlock(condition=%s, requests=%s)" % (self.condition, requests)
class ForEachBlock(Request):
NAME = "foreach"
def __init__(self, input_var, loop_var, requests, config):
super(ForEachBlock, self).__init__(config)
self.input_var = input_var
self.loop_var = loop_var
self.requests = requests
def __repr__(self):
requests = [repr(req) for req in self.requests]
fmt = "ForEachBlock(input=%s, loop_var=%s, requests=%s)"
return fmt % (self.input_var, self.loop_var, requests)
class TransactionBlock(Request):
NAME = "transaction"
def __init__(self, name, requests, include_timers, config, scenario):
super(TransactionBlock, self).__init__(config, scenario)
self.name = name
self.requests = requests
self.include_timers = include_timers
def __repr__(self):
requests = [repr(req) for req in self.requests]
fmt = "TransactionBlock(name=%s, requests=%s, include-timers=%r)"
return fmt % (self.name, requests, self.include_timers)
class IncludeScenarioBlock(Request):
NAME = "include-scenario"
def __init__(self, scenario_name, config):
super(IncludeScenarioBlock, self).__init__(config)
self.scenario_name = scenario_name
def __repr__(self):
return "IncludeScenarioBlock(scenario_name=%r)" % self.scenario_name
class RequestsParser(object):
def __init__(self, scenario, engine):
self.engine = engine
self.scenario = scenario
def __parse_request(self, req):
if 'if' in req:
condition = req.get("if")
# TODO: apply some checks to `condition`?
then_clause = req.get("then", TaurusConfigError("'then' clause is mandatory for 'if' blocks"))
then_requests = self.__parse_requests(then_clause)
else_clause = req.get("else", [])
else_requests = self.__parse_requests(else_clause)
return IfBlock(condition, then_requests, else_requests, req)
elif 'once' in req:
do_block = req.get("once", TaurusConfigError("operation list is mandatory for 'once' blocks"))
do_requests = self.__parse_requests(do_block)
return OnceBlock(do_requests, req)
elif 'loop' in req:
loops = req.get("loop")
do_block = req.get("do", TaurusConfigError("'do' option is mandatory for 'loop' blocks"))
do_requests = self.__parse_requests(do_block)
return LoopBlock(loops, do_requests, req)
elif 'while' in req:
condition = req.get("while")
do_block = req.get("do", TaurusConfigError("'do' option is mandatory for 'while' blocks"))
do_requests = self.__parse_requests(do_block)
return WhileBlock(condition, do_requests, req)
elif 'foreach' in req:
iteration_str = req.get("foreach")
match = re.match(r'(.+) in (.+)', iteration_str)
if not match:
msg = "'foreach' value should be in format '<elementName> in <collection>' but '%s' found"
raise TaurusConfigError(msg % iteration_str)
loop_var, input_var = match.groups()
do_block = req.get("do", TaurusConfigError("'do' field is mandatory for 'foreach' blocks"))
do_requests = self.__parse_requests(do_block)
return ForEachBlock(input_var, loop_var, do_requests, req)
elif 'transaction' in req:
name = req.get('transaction')
do_block = req.get('do', TaurusConfigError("'do' field is mandatory for transaction blocks"))
do_requests = self.__parse_requests(do_block)
include_timers = req.get('include-timers')
return TransactionBlock(name, do_requests, include_timers, req, self.scenario)
elif 'include-scenario' in req:
name = req.get('include-scenario')
return IncludeScenarioBlock(name, req)
elif 'action' in req:
action = req.get('action')
if action not in ('pause', 'stop', 'stop-now', 'continue'):
raise TaurusConfigError("Action should be either 'pause', 'stop', 'stop-now' or 'continue'")
target = req.get('target', 'current-thread')
if target not in ('current-thread', 'all-threads'):
msg = "Target for action should be either 'current-thread' or 'all-threads' but '%s' found"
raise TaurusConfigError(msg % target)
duration = req.get('pause-duration', None)
if duration is not None:
duration = dehumanize_time(duration)
return ActionBlock(action, target, duration, req)
elif 'set-variables' in req:
mapping = req.get('set-variables')
return SetVariables(mapping, req)
else:
return HierarchicHTTPRequest(req, self.scenario, self.engine)
def __parse_requests(self, raw_requests, require_url=True):
requests = []
for key in range(len(raw_requests)): # pylint: disable=consider-using-enumerate
req = ensure_is_dict(raw_requests, key, "url")
if not require_url and "url" not in req:
req["url"] = None
try:
requests.append(self.__parse_request(req))
except BaseException as exc:
logging.debug("%s\n%s" % (exc, traceback.format_exc()))
raise TaurusConfigError("Wrong request:\n %s" % req)
return requests
def extract_requests(self, require_url=True):
requests = self.scenario.get("requests", [])
return self.__parse_requests(requests, require_url=require_url)
class ActionBlock(Request):
def __init__(self, action, target, duration, config):
super(ActionBlock, self).__init__(config)
self.action = action
self.target = target
self.duration = duration
class SetVariables(Request):
def __init__(self, mapping, config):
super(SetVariables, self).__init__(config)
self.mapping = mapping
class RequestVisitor(object):
def __init__(self):
self.path = []
def clear_path_cache(self):
self.path = []
def record_path(self, path):
self.path.append(path)
def visit(self, node):
class_name = node.__class__.__name__.lower()
visitor = getattr(self, 'visit_' + class_name, None)
if visitor is not None:
return visitor(node)
raise TaurusInternalException("Visitor for class %s not found" % class_name)
class ResourceFilesCollector(RequestVisitor):
def __init__(self, executor):
"""
:param executor: JMeterExecutor
"""
super(ResourceFilesCollector, self).__init__()
self.executor = executor
def visit_hierarchichttprequest(self, request):
files = []
body_file = request.config.get('body-file')
if body_file and not has_variable_pattern(body_file):
files.append(body_file)
uploads = request.config.get('upload-files', [])
files.extend([x['path'] for x in uploads if not has_variable_pattern(x['path'])])
if 'jsr223' in request.config:
jsrs = request.config.get('jsr223')
if isinstance(jsrs, dict):
jsrs = [jsrs]
for jsr in jsrs:
if 'script-file' in jsr:
files.append(jsr.get('script-file'))
return files
def visit_ifblock(self, block):
files = []
for request in block.then_clause:
files.extend(self.visit(request))
for request in block.else_clause:
files.extend(self.visit(request))
return files
def visit_loopblock(self, block):
files = []
for request in block.requests:
files.extend(self.visit(request))
return files
def visit_whileblock(self, block):
files = []
for request in block.requests:
files.extend(self.visit(request))
return files
def visit_foreachblock(self, block):
files = []
for request in block.requests:
files.extend(self.visit(request))
return files
def visit_transactionblock(self, block):
files = []
for request in block.requests:
files.extend(self.visit(request))
return files
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
scenario = self.executor.get_scenario(name=block.scenario_name)
return self.executor.res_files_from_scenario(scenario)
def visit_actionblock(self, _):
return []
def visit_setvariables(self, _):
return []
| 1 | 15,093 | if it's unpredictable - why uppercase it at all? | Blazemeter-taurus | py |
@@ -25,6 +25,7 @@ type KubeCloudInstTool struct {
func (cu *KubeCloudInstTool) InstallTools() error {
cu.SetOSInterface(GetOSInterface())
cu.SetKubeEdgeVersion(cu.ToolVersion)
+ fmt.Println("beforeinstallkubeedge")
err := cu.InstallKubeEdge()
if err != nil { | 1 | package util
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"time"
"github.com/kubeedge/kubeedge/keadm/app/cmd/common"
)
//KubeCloudInstTool embedes Common struct
//It implements ToolsInstaller interface
type KubeCloudInstTool struct {
Common
}
//InstallTools downloads KubeEdge for the specified version
//and makes the required configuration changes and initiates edgecontroller.
func (cu *KubeCloudInstTool) InstallTools() error {
cu.SetOSInterface(GetOSInterface())
cu.SetKubeEdgeVersion(cu.ToolVersion)
err := cu.InstallKubeEdge()
if err != nil {
return err
}
err = cu.generateCertificates()
if err != nil {
return err
}
err = cu.tarCertificates()
if err != nil {
return err
}
err = cu.StartK8Scluster()
if err != nil {
return err
}
err = cu.updateManifests()
if err != nil {
return err
}
//This makes sure the path is created, if it already exists also it is fine
err = os.MkdirAll(KubeEdgeCloudConfPath, os.ModePerm)
if err != nil {
return fmt.Errorf("not able to create %s folder path", KubeEdgeConfPath)
}
//Create controller.yaml
if err = common.WriteControllerYamlFile(KubeEdgeControllerYaml, cu.KubeConfig); err != nil {
return err
}
//Create logger.yaml
if err = common.WriteCloudLoggingYamlFile(KubeEdgeControllerLoggingYaml); err != nil {
return err
}
//Create modules.yaml
if err = common.WriteCloudModulesYamlFile(KubeEdgeControllerModulesYaml); err != nil {
return err
}
time.Sleep(1 * time.Second)
err = cu.RunEdgeController()
if err != nil {
return err
}
fmt.Println("Edgecontroller started")
return nil
}
//generateCertificates - Certifcates ca,cert will be generated in /etc/kubeedge/
func (cu *KubeCloudInstTool) generateCertificates() error {
//Create certgen.sh
if err := ioutil.WriteFile(KubeEdgeCloudCertGenPath, CertGenSh, 0775); err != nil {
return err
}
cmd := &Command{Cmd: exec.Command("bash", "-x", KubeEdgeCloudCertGenPath, "genCertAndKey", "edge")}
err := cmd.ExecuteCmdShowOutput()
stdout := cmd.GetStdOutput()
errout := cmd.GetStdErr()
if err != nil || errout != "" {
return fmt.Errorf("%s", "certificates not installed")
}
fmt.Println(stdout)
fmt.Println("Certificates got generated at:", KubeEdgePath, "ca and", KubeEdgePath, "certs")
return nil
}
//tarCertificates - certs will be tared at /etc/kubeedge/kubeedge/certificates/certs
func (cu *KubeCloudInstTool) tarCertificates() error {
tarCmd := fmt.Sprintf("tar -cvzf %s %s", KubeEdgeEdgeCertsTarFileName, strings.Split(KubeEdgeEdgeCertsTarFileName, ".")[0])
cmd := &Command{Cmd: exec.Command("sh", "-c", tarCmd)}
cmd.Cmd.Dir = KubeEdgePath
err := cmd.ExecuteCmdShowOutput()
stdout := cmd.GetStdOutput()
errout := cmd.GetStdErr()
if err != nil || errout != "" {
return fmt.Errorf("%s", "error in tarring the certificates")
}
fmt.Println(stdout)
fmt.Println("Certificates got tared at:", KubeEdgePath, "path, Please copy it to desired edge node (at", KubeEdgePath, "path)")
return nil
}
//updateManifests - Kubernetes Manifests file will be updated by necessary parameters
func (cu *KubeCloudInstTool) updateManifests() error {
input, err := ioutil.ReadFile(KubeCloudApiserverYamlPath)
if err != nil {
fmt.Println(err)
return err
}
output := bytes.Replace(input, []byte("insecure-port=0"), []byte("insecure-port=8080"), -1)
if err = ioutil.WriteFile(KubeCloudApiserverYamlPath, output, 0666); err != nil {
fmt.Println(err)
return err
}
lines, err := file2lines(KubeCloudApiserverYamlPath)
if err != nil {
return err
}
fileContent := ""
for i, line := range lines {
if i == KubeCloudReplaceIndex {
fileContent += KubeCloudReplaceString
}
fileContent += line
fileContent += "\n"
}
ioutil.WriteFile(KubeCloudApiserverYamlPath, []byte(fileContent), 0644)
return nil
}
func file2lines(filePath string) ([]string, error) {
f, err := os.Open(filePath)
if err != nil {
return nil, err
}
defer f.Close()
return linesFromReader(f)
}
func linesFromReader(r io.Reader) ([]string, error) {
var lines []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
//RunEdgeController starts edgecontroller process
func (cu *KubeCloudInstTool) RunEdgeController() error {
filetoCopy := fmt.Sprintf("cp %s/kubeedge/cloud/%s /usr/local/bin/.", KubeEdgePath, KubeCloudBinaryName)
cmd := &Command{Cmd: exec.Command("sh", "-c", filetoCopy)}
err := cmd.ExecuteCmdShowOutput()
errout := cmd.GetStdErr()
if err != nil || errout != "" {
fmt.Println("in error")
return fmt.Errorf("%s", errout)
}
binExec := fmt.Sprintf("chmod +x /usr/local/bin/%s && %s > %s/kubeedge/cloud/%s.log 2>&1 &", KubeCloudBinaryName, KubeCloudBinaryName, KubeEdgePath, KubeCloudBinaryName)
cmd = &Command{Cmd: exec.Command("sh", "-c", binExec)}
cmd.Cmd.Env = os.Environ()
env := fmt.Sprintf("GOARCHAIUS_CONFIG_PATH=%skubeedge/cloud", KubeEdgePath)
cmd.Cmd.Env = append(cmd.Cmd.Env, env)
err = cmd.ExecuteCmdShowOutput()
errout = cmd.GetStdErr()
if err != nil || errout != "" {
return fmt.Errorf("%s", errout)
}
fmt.Println(cmd.GetStdOutput())
fmt.Println("KubeEdge controller is running, For logs visit", KubeEdgePath+"kubeedge/cloud/")
return nil
}
//TearDown method will remove the edge node from api-server and stop edgecontroller process
func (cu *KubeCloudInstTool) TearDown() error {
cu.SetOSInterface(GetOSInterface())
//Stops kubeadm
binExec := fmt.Sprintf("echo 'y' | kubeadm reset && rm -rf ~/.kube")
cmd := &Command{Cmd: exec.Command("sh", "-c", binExec)}
err := cmd.ExecuteCmdShowOutput()
errout := cmd.GetStdErr()
if err != nil || errout != "" {
return fmt.Errorf("kubeadm reset failed %s", errout)
}
//Kill edgecontroller process
cu.KillKubeEdgeBinary(KubeCloudBinaryName)
return nil
}
| 1 | 11,935 | Please remove all these debug prints. It doesn't look good. | kubeedge-kubeedge | go |
@@ -74,6 +74,7 @@ type VMContext interface {
BlockHeight() *types.BlockHeight
IsFromAccountActor() bool
Charge(cost types.GasUnits) error
+ SampleChainRandomness(sampleHeight *types.BlockHeight) ([]byte, error)
CreateNewActor(addr address.Address, code cid.Cid, initalizationParams interface{}) error
| 1 | package exec
import (
"context"
"gx/ipfs/QmNf3wujpV2Y7Lnj2hy2UrmuX8bhMDStRHbnSLh7Ypf36h/go-hamt-ipld"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm/errors"
)
// Error represents a storage related error
type Error string
func (e Error) Error() string { return string(e) }
const (
// ErrDecode indicates that a chunk an actor tried to write could not be decoded
ErrDecode = 33
// ErrDanglingPointer indicates that an actor attempted to commit a pointer to a non-existent chunk
ErrDanglingPointer = 34
// ErrStaleHead indicates that an actor attempted to commit over a stale chunk
ErrStaleHead = 35
// ErrInsufficientGas indicates that an actor did not have sufficient gas to run a message
ErrInsufficientGas = 36
)
// Errors map error codes to revert errors this actor may return
var Errors = map[uint8]error{
ErrDecode: errors.NewCodedRevertError(ErrDecode, "State could not be decoded"),
ErrDanglingPointer: errors.NewCodedRevertError(ErrDanglingPointer, "State contains pointer to non-existent chunk"),
ErrStaleHead: errors.NewCodedRevertError(ErrStaleHead, "Expected head is stale"),
}
// Exports describe the public methods of an actor.
type Exports map[string]*FunctionSignature
// Has checks if the given method is an exported method.
func (e Exports) Has(method string) bool {
_, ok := e[method]
return ok
}
// TODO fritz require actors to define their exit codes and associate
// an error string with them.
// ExecutableActor is the interface all builtin actors have to implement.
type ExecutableActor interface {
Exports() Exports
InitializeState(storage Storage, initializerData interface{}) error
}
// ExportedFunc is the signature an exported method of an actor is expected to have.
type ExportedFunc func(ctx VMContext) ([]byte, uint8, error)
// FunctionSignature describes the signature of a single function.
// TODO: convert signatures into non go types, but rather low level agreed up types
type FunctionSignature struct {
// Params is a list of the types of the parameters the function expects.
Params []abi.Type
// Return is the type of the return value of the function.
Return []abi.Type
}
// VMContext defines the ABI interface exposed to actors.
type VMContext interface {
Message() *types.Message
Storage() Storage
Send(to address.Address, method string, value *types.AttoFIL, params []interface{}) ([][]byte, uint8, error)
AddressForNewActor() (address.Address, error)
BlockHeight() *types.BlockHeight
IsFromAccountActor() bool
Charge(cost types.GasUnits) error
CreateNewActor(addr address.Address, code cid.Cid, initalizationParams interface{}) error
// TODO: Remove these when Storage above is completely implemented
ReadStorage() ([]byte, error)
WriteStorage(interface{}) error
}
// Storage defines the storage module exposed to actors.
type Storage interface {
// TODO: Forgot that Put() can fail in the spec, need to update.
Put(interface{}) (cid.Cid, error)
Get(cid.Cid) ([]byte, error)
Commit(cid.Cid, cid.Cid) error
Head() cid.Cid
}
// Lookup defines an internal interface for actor storage.
type Lookup interface {
Find(ctx context.Context, k string) (interface{}, error)
Set(ctx context.Context, k string, v interface{}) error
Commit(ctx context.Context) (cid.Cid, error)
Delete(ctx context.Context, k string) error
IsEmpty() bool
Values(ctx context.Context) ([]*hamt.KV, error)
}
| 1 | 17,637 | Consumers of this interface should not be required to provide `sampleHeight`. This should be an expected consensus parameter. | filecoin-project-venus | go |
@@ -58,8 +58,14 @@ func GetBackoffForNextSchedule(cronSchedule string, startTime time.Time, closeTi
if err != nil {
return NoBackoff
}
+
+ if closeTime.Before(startTime) {
+ closeTime = startTime
+ }
+
startUTCTime := startTime.In(time.UTC)
closeUTCTime := closeTime.In(time.UTC)
+
nextScheduleTime := schedule.Next(startUTCTime)
// Calculate the next schedule start time which is nearest to the close time
for nextScheduleTime.Before(closeUTCTime) { | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package backoff
import (
"time"
"github.com/robfig/cron"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common/convert"
)
// NoBackoff is used to represent backoff when no cron backoff is needed
const NoBackoff = time.Duration(-1)
// ValidateSchedule validates a cron schedule spec
func ValidateSchedule(cronSchedule string) error {
if cronSchedule == "" {
return nil
}
if _, err := cron.ParseStandard(cronSchedule); err != nil {
return serviceerror.NewInvalidArgument("Invalid CronSchedule.")
}
return nil
}
// GetBackoffForNextSchedule calculates the backoff time for the next run given
// a cronSchedule, workflow start time and workflow close time
func GetBackoffForNextSchedule(cronSchedule string, startTime time.Time, closeTime time.Time) time.Duration {
if len(cronSchedule) == 0 {
return NoBackoff
}
schedule, err := cron.ParseStandard(cronSchedule)
if err != nil {
return NoBackoff
}
startUTCTime := startTime.In(time.UTC)
closeUTCTime := closeTime.In(time.UTC)
nextScheduleTime := schedule.Next(startUTCTime)
// Calculate the next schedule start time which is nearest to the close time
for nextScheduleTime.Before(closeUTCTime) {
nextScheduleTime = schedule.Next(nextScheduleTime)
}
backoffInterval := nextScheduleTime.Sub(closeUTCTime)
roundedInterval := time.Second * time.Duration(convert.Int64Ceil(backoffInterval.Seconds()))
return roundedInterval
}
// GetBackoffForNextScheduleInSeconds calculates the backoff time in seconds for the
// next run given a cronSchedule and current time
func GetBackoffForNextScheduleInSeconds(cronSchedule string, startTime time.Time, closeTime time.Time) int32 {
backoffDuration := GetBackoffForNextSchedule(cronSchedule, startTime, closeTime)
if backoffDuration == NoBackoff {
return 0
}
return convert.Int32Ceil(backoffDuration.Seconds())
}
| 1 | 9,895 | I don't believe this is the right fix. If startTime comes after closeTime, then it means some other event triggered invocation of this code path like workflow timeout. In this case we should still try to fire the cron on previous value so we should just return start the delta between startTime and closeTime immediately. | temporalio-temporal | go |
@@ -348,6 +348,10 @@ type CloudBackupGenericRequest struct {
CredentialUUID string
// All if set to true, backups for all clusters in the cloud are processed
All bool
+ // StatusFilter indicates backups based on status
+ StatusFilter CloudBackupStatusType
+ // TagFilter indicates backups based on tag
+ TagFilter string
}
type CloudBackupInfo struct { | 1 | package api
import (
"context"
"fmt"
"math"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/ptypes"
"github.com/libopenstorage/openstorage/pkg/auth"
"github.com/mohae/deepcopy"
)
// Strings for VolumeSpec
const (
Name = "name"
Token = "token"
TokenSecret = "token_secret"
SpecNodes = "nodes"
SpecParent = "parent"
SpecEphemeral = "ephemeral"
SpecShared = "shared"
SpecJournal = "journal"
SpecSharedv4 = "sharedv4"
SpecCascaded = "cascaded"
SpecSticky = "sticky"
SpecSecure = "secure"
SpecCompressed = "compressed"
SpecSize = "size"
SpecScale = "scale"
SpecFilesystem = "fs"
SpecBlockSize = "block_size"
SpecQueueDepth = "queue_depth"
SpecHaLevel = "repl"
SpecPriority = "io_priority"
SpecSnapshotInterval = "snap_interval"
SpecSnapshotSchedule = "snap_schedule"
SpecAggregationLevel = "aggregation_level"
SpecDedupe = "dedupe"
SpecPassphrase = "secret_key"
SpecAutoAggregationValue = "auto"
SpecGroup = "group"
SpecGroupEnforce = "fg"
SpecZones = "zones"
SpecRacks = "racks"
SpecRack = "rack"
SpecRegions = "regions"
SpecLabels = "labels"
SpecPriorityAlias = "priority_io"
SpecIoProfile = "io_profile"
SpecAsyncIo = "async_io"
SpecEarlyAck = "early_ack"
// SpecBestEffortLocationProvisioning default is false. If set provisioning request will succeed
// even if specified data location parameters could not be satisfied.
SpecBestEffortLocationProvisioning = "best_effort_location_provisioning"
// SpecForceUnsuppportedFsType is of type boolean and if true it sets
// the VolumeSpec.force_unsupported_fs_type. When set to true it asks
// the driver to use an unsupported value of VolumeSpec.format if possible
SpecForceUnsupportedFsType = "force_unsupported_fs_type"
SpecNodiscard = "nodiscard"
StoragePolicy = "storagepolicy"
)
// OptionKey specifies a set of recognized query params.
const (
// OptName query parameter used to lookup volume by name.
OptName = "Name"
// OptVolumeID query parameter used to lookup volume by ID.
OptVolumeID = "VolumeID"
// OptSnapID query parameter used to lookup snapshot by ID.
OptSnapID = "SnapID"
// OptLabel query parameter used to lookup volume by set of labels.
OptLabel = "Label"
// OptConfigLabel query parameter used to lookup volume by set of labels.
OptConfigLabel = "ConfigLabel"
// OptCumulative query parameter used to request cumulative stats.
OptCumulative = "Cumulative"
// OptTimeout query parameter used to indicate timeout seconds
OptTimeoutSec = "TimeoutSec"
// OptQuiesceID query parameter use for quiesce
OptQuiesceID = "QuiesceID"
// OptCredUUID is the UUID of the credential
OptCredUUID = "CredUUID"
// OptCredName indicates unique name of credential
OptCredName = "CredName"
// OptCredType indicates type of credential
OptCredType = "CredType"
// OptCredEncrKey is the key used to encrypt data
OptCredEncrKey = "CredEncrypt"
// OptCredRegion indicates the region for s3
OptCredRegion = "CredRegion"
// OptCredDisableSSL indicated if SSL should be disabled
OptCredDisableSSL = "CredDisableSSL"
// OptCredEndpoint indicate the cloud endpoint
OptCredEndpoint = "CredEndpoint"
// OptCredAccKey for s3
OptCredAccessKey = "CredAccessKey"
// OptCredSecretKey for s3
OptCredSecretKey = "CredSecretKey"
// OptCredBucket is the optional bucket name
OptCredBucket = "CredBucket"
// OptCredGoogleProjectID projectID for google cloud
OptCredGoogleProjectID = "CredProjectID"
// OptCredGoogleJsonKey for google cloud
OptCredGoogleJsonKey = "CredJsonKey"
// OptCredAzureAccountName is the account name for
// azure as the cloud provider
OptCredAzureAccountName = "CredAccountName"
// OptOptCredAzureAccountKey is the accountkey for
// azure as the cloud provider
OptCredAzureAccountKey = "CredAccountKey"
// Credential ownership key in params
OptCredOwnership = "CredOwnership"
// OptCloudBackupID is the backID in the cloud
OptCloudBackupID = "CloudBackID"
// OptSrcVolID is the source volume ID of the backup
OptSrcVolID = "SrcVolID"
// OptBkupOpState is the desired operational state
// (stop/pause/resume) of backup/restore
OptBkupOpState = "OpState"
// OptBackupSchedUUID is the UUID of the backup-schedule
OptBackupSchedUUID = "BkupSchedUUID"
// OptVolumeSubFolder query parameter used to catalog a particular path inside a volume
OptCatalogSubFolder = "subfolder"
// OptCatalogMaxDepth query parameter used to limit the depth we return
OptCatalogMaxDepth = "depth"
)
// Api clientserver Constants
const (
OsdVolumePath = "osd-volumes"
OsdSnapshotPath = "osd-snapshot"
OsdCredsPath = "osd-creds"
OsdBackupPath = "osd-backup"
OsdMigratePath = "osd-migrate"
OsdMigrateStartPath = OsdMigratePath + "/start"
OsdMigrateCancelPath = OsdMigratePath + "/cancel"
OsdMigrateStatusPath = OsdMigratePath + "/status"
TimeLayout = "Jan 2 15:04:05 UTC 2006"
)
const (
// AutoAggregation value indicates driver to select aggregation level.
AutoAggregation = math.MaxUint32
)
// Node describes the state of a node.
// It includes the current physical state (CPU, memory, storage, network usage) as
// well as the containers running on the system.
//
// swagger:model
type Node struct {
// Id of the node.
Id string
// SchedulerNodeName is name of the node in scheduler context. It can be
// empty if unable to get the name from the scheduler.
SchedulerNodeName string
// Cpu usage of the node.
Cpu float64 // percentage.
// Total Memory of the node
MemTotal uint64
// Used Memory of the node
MemUsed uint64
// Free Memory of the node
MemFree uint64
// Average load (percentage)
Avgload int
// Node Status see (Status object)
Status Status
// GenNumber of the node
GenNumber uint64
// List of disks on this node.
Disks map[string]StorageResource
// List of storage pools this node supports
Pools []StoragePool
// Management IP
MgmtIp string
// Data IP
DataIp string
// Timestamp
Timestamp time.Time
// Start time of this node
StartTime time.Time
// Hostname of this node
Hostname string
// Node data for this node (EX: Public IP, Provider, City..)
NodeData map[string]interface{}
// User defined labels for node. Key Value pairs
NodeLabels map[string]string
// GossipPort is the port used by the gossip protocol
GossipPort string
}
// FluentDConfig describes ip and port of a fluentdhost.
// DEPRECATED
//
// swagger:model
type FluentDConfig struct {
IP string `json:"ip"`
Port string `json:"port"`
}
// TunnelConfig describes key, cert and endpoint of a reverse proxy tunnel
// DEPRECATED
//
// swagger:model
type TunnelConfig struct {
Key string `json:"key"`
Cert string `json:"cert"`
Endpoint string `json:"tunnel_endpoint"`
}
// Cluster represents the state of the cluster.
//
// swagger:model
type Cluster struct {
Status Status
// Id of the cluster.
//
// required: true
Id string
// Id of the node on which this cluster object is initialized
NodeId string
// array of all the nodes in the cluster.
Nodes []Node
// Logging url for the cluster.
LoggingURL string
// Management url for the cluster
ManagementURL string
// FluentD Host for the cluster
FluentDConfig FluentDConfig
// TunnelConfig for the cluster [key, cert, endpoint]
TunnelConfig TunnelConfig
}
// CredCreateRequest is the input for CredCreate command
type CredCreateRequest struct {
// InputParams is map describing cloud provide
InputParams map[string]string
}
// CredCreateResponse is returned for CredCreate command
type CredCreateResponse struct {
// UUID of the credential that was just created
UUID string
}
// StatPoint represents the basic structure of a single Stat reported
// TODO: This is the first step to introduce stats in openstorage.
// Follow up task is to introduce an API for logging stats
type StatPoint struct {
// Name of the Stat
Name string
// Tags for the Stat
Tags map[string]string
// Fields and values of the stat
Fields map[string]interface{}
// Timestamp in Unix format
Timestamp int64
}
type CloudBackupCreateRequest struct {
// VolumeID of the volume for which cloudbackup is requested
VolumeID string
// CredentialUUID is cloud credential to be used for backup
CredentialUUID string
// Full indicates if full backup is desired even though incremental is possible
Full bool
// Name is optional unique id to be used for this backup
// If not specified backup creates this by default
Name string
// Labels are list of key value pairs to tag the cloud backup. These labels
// are stored in the metadata associated with the backup.
Labels map[string]string
// FullBackupFrequency indicates number of incremental backup after whcih
// a fullbackup must be created. This is to override the default value for
// manual/user triggerred backups and not applicable for scheduled backups.
// Value of 0 retains the default behavior.
FullBackupFrequency uint32
}
type CloudBackupCreateResponse struct {
// Name of the task performing this backup
Name string
}
type CloudBackupGroupCreateRequest struct {
// GroupID indicates backup request for a volumegroup with this group id
GroupID string
// Labels indicates backup request for a volume group with these labels
Labels map[string]string
// VolumeIDs are a list of volume IDs to use for the backup request
// If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of
// them are backed up to cloud
VolumeIDs []string
// CredentialUUID is cloud credential to be used for backup
CredentialUUID string
// Full indicates if full backup is desired even though incremental is possible
Full bool
}
type CloudBackupRestoreRequest struct {
// ID is the backup ID being restored
ID string
// RestoreVolumeName is optional volume Name of the new volume to be created
// in the cluster for restoring the cloudbackup
RestoreVolumeName string
// CredentialUUID is the credential to be used for restore operation
CredentialUUID string
// NodeID is the optional NodeID for provisioning restore
// volume (ResoreVolumeName should not be specified)
NodeID string
// Name is optional unique id to be used for this restore op
// restore creates this by default
Name string
}
type CloudBackupGroupCreateResponse struct {
// ID for this group of backups
GroupCloudBackupID string
// Names of the tasks performing this group backup
Names []string
}
type CloudBackupRestoreResponse struct {
// RestoreVolumeID is the volumeID to which the backup is being restored
RestoreVolumeID string
// Name of the task performing this restore
Name string
}
type CloudBackupGenericRequest struct {
// SrcVolumeID is optional Source VolumeID for the request
SrcVolumeID string
// ClusterID is the optional clusterID for the request
ClusterID string
// CredentialUUID is the credential for cloud to be used for the request
CredentialUUID string
// All if set to true, backups for all clusters in the cloud are processed
All bool
}
type CloudBackupInfo struct {
// ID is the ID of the cloud backup
ID string
// SrcVolumeID is Source volumeID of the backup
SrcVolumeID string
// SrcvolumeName is name of the sourceVolume of the backup
SrcVolumeName string
// Timestamp is the timestamp at which the source volume
// was backed up to cloud
Timestamp time.Time
// Metadata associated with the backup
Metadata map[string]string
// Status indicates the status of the backup
Status string
}
type CloudBackupEnumerateRequest struct {
CloudBackupGenericRequest
}
type CloudBackupEnumerateResponse struct {
// Backups is list of backups in cloud for given volume/cluster/s
Backups []CloudBackupInfo
}
type CloudBackupDeleteRequest struct {
// ID is the ID of the cloud backup
ID string
// CredentialUUID is the credential for cloud to be used for the request
CredentialUUID string
// Force Delete cloudbackup even if there are dependencies
Force bool
}
type CloudBackupDeleteAllRequest struct {
CloudBackupGenericRequest
}
type CloudBackupStatusRequest struct {
// SrcVolumeID optional volumeID to list status of backup/restore
SrcVolumeID string
// Local indicates if only those backups/restores that are
// active on current node must be returned
Local bool
// ID of the backup/restore task. If this is specified, SrcVolumeID is
// ignored. This could be GroupCloudBackupId too, and in that case multiple
// statuses belonging to the groupCloudBackupID is returned.
ID string
}
type CloudBackupStatusRequestOld struct {
// Old field for task ID
Name string
// New structure
CloudBackupStatusRequest
}
type CloudBackupOpType string
const (
CloudBackupOp = CloudBackupOpType("Backup")
CloudRestoreOp = CloudBackupOpType("Restore")
)
type CloudBackupStatusType string
const (
CloudBackupStatusNotStarted = CloudBackupStatusType("NotStarted")
CloudBackupStatusDone = CloudBackupStatusType("Done")
CloudBackupStatusAborted = CloudBackupStatusType("Aborted")
CloudBackupStatusPaused = CloudBackupStatusType("Paused")
CloudBackupStatusStopped = CloudBackupStatusType("Stopped")
CloudBackupStatusActive = CloudBackupStatusType("Active")
CloudBackupStatusQueued = CloudBackupStatusType("Queued")
CloudBackupStatusFailed = CloudBackupStatusType("Failed")
)
const (
CloudBackupRequestedStatePause = "pause"
CloudBackupRequestedStateResume = "resume"
CloudBackupRequestedStateStop = "stop"
)
type CloudBackupStatus struct {
// ID is the ID for the operation
ID string
// OpType indicates if this is a backup or restore
OpType CloudBackupOpType
// State indicates if the op is currently active/done/failed
Status CloudBackupStatusType
// BytesDone indicates Bytes uploaded/downloaded so far
BytesDone uint64
// BytesTotal is the total number of bytes being transferred
BytesTotal uint64
// EtaSeconds estimated time in seconds for backup/restore completion
EtaSeconds int64
// StartTime indicates Op's start time
StartTime time.Time
// CompletedTime indicates Op's completed time
CompletedTime time.Time
// NodeID is the ID of the node where this Op is active
NodeID string
// SrcVolumeID is either the volume being backed-up or target volume to
// which a cloud backup is being restored
SrcVolumeID string
// Info currently indicates only failure cause in case of failed backup/restore
Info []string
// CredentialUUID used for this backup/restore op
CredentialUUID string
// GroupCloudBackupID is valid for backups that were started as part of group
// cloudbackup request
GroupCloudBackupID string
}
type CloudBackupStatusResponse struct {
// statuses is list of currently active/failed/done backup/restores
// map key is the id of the task
Statuses map[string]CloudBackupStatus
}
type CloudBackupCatalogRequest struct {
// ID is Backup ID in the cloud
ID string
// CredentialUUID is the credential for cloud
CredentialUUID string
}
type CloudBackupCatalogResponse struct {
// Contents is listing of backup contents
Contents []string
}
type CloudBackupHistoryRequest struct {
// SrcVolumeID is volumeID for which history of backup/restore
// is being requested
SrcVolumeID string
}
type CloudBackupHistoryItem struct {
// SrcVolumeID is volume ID which was backedup
SrcVolumeID string
// TimeStamp is the time at which either backup completed/failed
Timestamp time.Time
// Status indicates whether backup was completed/failed
Status string
}
type CloudBackupHistoryResponse struct {
// HistoryList is list of past backup/restores in the cluster
HistoryList []CloudBackupHistoryItem
}
type CloudBackupStateChangeRequest struct {
// Name of the backup/restore task for which state change
// is being requested
Name string
// RequestedState is desired state of the op
// can be pause/resume/stop
RequestedState string
}
type CloudBackupScheduleInfo struct {
// SrcVolumeID is the schedule's source volume
SrcVolumeID string
// CredentialUUID is the cloud credential used with this schedule
CredentialUUID string
// Schedule is the frequence of backup
Schedule string
// MaxBackups are the maximum number of backups retained
// in cloud.Older backups are deleted
MaxBackups uint
// GroupID indicates the group of volumes for this cloudbackup schedule
GroupID string
// Labels indicates a volume group for this cloudsnap schedule
Labels map[string]string
// Full indicates if scheduled backups must be full always
Full bool
// RetentionDays is the number of days that the scheduled backups will be kept
// and after these number of days it will be deleted
RetentionDays uint32
}
type CloudBackupSchedCreateRequest struct {
CloudBackupScheduleInfo
}
type CloudBackupGroupSchedCreateRequest struct {
// GroupID indicates the group of volumes for which cloudbackup schedule is
// being created
GroupID string
// Labels indicates a volume group for which this group cloudsnap schedule is
// being created. If this is provided GroupId is not needed and vice-versa.
Labels map[string]string
// VolumeIDs are a list of volume IDs to use for the backup request
// If multiple of GroupID, Labels or VolumeIDs are specified, volumes matching all of
// them are backed up to cloud
VolumeIDs []string
// CredentialUUID is cloud credential to be used with this schedule
CredentialUUID string
// Schedule is the frequency of backup
Schedule string
// MaxBackups are the maximum number of backups retained
// in cloud.Older backups are deleted
MaxBackups uint
// Full indicates if scheduled backups must be full always
Full bool
}
type CloudBackupSchedCreateResponse struct {
// UUID is the UUID of the newly created schedule
UUID string
}
type CloudBackupSchedDeleteRequest struct {
// UUID is UUID of the schedule to be deleted
UUID string
}
type CloudBackupSchedEnumerateResponse struct {
// Schedule is map of schedule uuid to scheduleInfo
Schedules map[string]CloudBackupScheduleInfo
}
// Defines the response for CapacityUsage request
type CapacityUsageResponse struct {
CapacityUsageInfo *CapacityUsageInfo
// Describes the err if all of the usage details could not be obtained
Error error
}
//
// DriverTypeSimpleValueOf returns the string format of DriverType
func DriverTypeSimpleValueOf(s string) (DriverType, error) {
obj, err := simpleValueOf("driver_type", DriverType_value, s)
return DriverType(obj), err
}
// SimpleString returns the string format of DriverType
func (x DriverType) SimpleString() string {
return simpleString("driver_type", DriverType_name, int32(x))
}
// FSTypeSimpleValueOf returns the string format of FSType
func FSTypeSimpleValueOf(s string) (FSType, error) {
obj, err := simpleValueOf("fs_type", FSType_value, s)
return FSType(obj), err
}
// SimpleString returns the string format of DriverType
func (x FSType) SimpleString() string {
return simpleString("fs_type", FSType_name, int32(x))
}
// CosTypeSimpleValueOf returns the string format of CosType
func CosTypeSimpleValueOf(s string) (CosType, error) {
obj, exists := CosType_value[strings.ToUpper(s)]
if !exists {
return -1, fmt.Errorf("Invalid cos value: %s", s)
}
return CosType(obj), nil
}
// SimpleString returns the string format of CosType
func (x CosType) SimpleString() string {
return simpleString("cos_type", CosType_name, int32(x))
}
// GraphDriverChangeTypeSimpleValueOf returns the string format of GraphDriverChangeType
func GraphDriverChangeTypeSimpleValueOf(s string) (GraphDriverChangeType, error) {
obj, err := simpleValueOf("graph_driver_change_type", GraphDriverChangeType_value, s)
return GraphDriverChangeType(obj), err
}
// SimpleString returns the string format of GraphDriverChangeType
func (x GraphDriverChangeType) SimpleString() string {
return simpleString("graph_driver_change_type", GraphDriverChangeType_name, int32(x))
}
// VolumeActionParamSimpleValueOf returns the string format of VolumeAction
func VolumeActionParamSimpleValueOf(s string) (VolumeActionParam, error) {
obj, err := simpleValueOf("volume_action_param", VolumeActionParam_value, s)
return VolumeActionParam(obj), err
}
// SimpleString returns the string format of VolumeAction
func (x VolumeActionParam) SimpleString() string {
return simpleString("volume_action_param", VolumeActionParam_name, int32(x))
}
// VolumeStateSimpleValueOf returns the string format of VolumeState
func VolumeStateSimpleValueOf(s string) (VolumeState, error) {
obj, err := simpleValueOf("volume_state", VolumeState_value, s)
return VolumeState(obj), err
}
// SimpleString returns the string format of VolumeState
func (x VolumeState) SimpleString() string {
return simpleString("volume_state", VolumeState_name, int32(x))
}
// VolumeStatusSimpleValueOf returns the string format of VolumeStatus
func VolumeStatusSimpleValueOf(s string) (VolumeStatus, error) {
obj, err := simpleValueOf("volume_status", VolumeStatus_value, s)
return VolumeStatus(obj), err
}
// SimpleString returns the string format of VolumeStatus
func (x VolumeStatus) SimpleString() string {
return simpleString("volume_status", VolumeStatus_name, int32(x))
}
// IoProfileSimpleValueOf returns the string format of IoProfile
func IoProfileSimpleValueOf(s string) (IoProfile, error) {
obj, err := simpleValueOf("io_profile", IoProfile_value, s)
return IoProfile(obj), err
}
// SimpleString returns the string format of IoProfile
func (x IoProfile) SimpleString() string {
return simpleString("io_profile", IoProfile_name, int32(x))
}
func simpleValueOf(typeString string, valueMap map[string]int32, s string) (int32, error) {
obj, ok := valueMap[strings.ToUpper(fmt.Sprintf("%s_%s", typeString, s))]
if !ok {
return 0, fmt.Errorf("no openstorage.%s for %s", strings.ToUpper(typeString), s)
}
return obj, nil
}
func simpleString(typeString string, nameMap map[int32]string, v int32) string {
s, ok := nameMap[v]
if !ok {
return strconv.Itoa(int(v))
}
return strings.TrimPrefix(strings.ToLower(s), fmt.Sprintf("%s_", strings.ToLower(typeString)))
}
func toSec(ms uint64) uint64 {
return ms / 1000
}
// WriteThroughput returns the write throughput
func (v *Stats) WriteThroughput() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.WriteBytes) / intv
}
// ReadThroughput returns the read throughput
func (v *Stats) ReadThroughput() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.ReadBytes) / intv
}
// Latency returns latency
func (v *Stats) Latency() uint64 {
ops := v.Writes + v.Reads
if ops == 0 {
return 0
}
return (uint64)((v.IoMs * 1000) / ops)
}
// Read latency returns avg. time required for read operation to complete
func (v *Stats) ReadLatency() uint64 {
if v.Reads == 0 {
return 0
}
return (uint64)((v.ReadMs * 1000) / v.Reads)
}
// Write latency returns avg. time required for write operation to complete
func (v *Stats) WriteLatency() uint64 {
if v.Writes == 0 {
return 0
}
return (uint64)((v.WriteMs * 1000) / v.Writes)
}
// Iops returns iops
func (v *Stats) Iops() uint64 {
intv := toSec(v.IntervalMs)
if intv == 0 {
return 0
}
return (v.Writes + v.Reads) / intv
}
// Scaled returns true if the volume is scaled.
func (v *Volume) Scaled() bool {
return v.Spec.Scale > 1
}
// Contains returns true if mid is a member of volume's replication set.
func (m *Volume) Contains(mid string) bool {
rsets := m.GetReplicaSets()
for _, rset := range rsets {
for _, node := range rset.Nodes {
if node == mid {
return true
}
}
}
return false
}
// Copy makes a deep copy of VolumeSpec
func (s *VolumeSpec) Copy() *VolumeSpec {
spec := *s
if s.ReplicaSet != nil {
spec.ReplicaSet = &ReplicaSet{Nodes: make([]string, len(s.ReplicaSet.Nodes))}
copy(spec.ReplicaSet.Nodes, s.ReplicaSet.Nodes)
}
return &spec
}
// Copy makes a deep copy of Node
func (s *Node) Copy() *Node {
localCopy := deepcopy.Copy(*s)
nodeCopy := localCopy.(Node)
return &nodeCopy
}
func (v Volume) IsClone() bool {
return v.Source != nil && len(v.Source.Parent) != 0 && !v.Readonly
}
func (v Volume) IsSnapshot() bool {
return v.Source != nil && len(v.Source.Parent) != 0 && v.Readonly
}
func (v Volume) DisplayId() string {
if v.Locator != nil {
return fmt.Sprintf("%s (%s)", v.Locator.Name, v.Id)
} else {
return v.Id
}
}
// ToStorageNode converts a Node structure to an exported gRPC StorageNode struct
func (s *Node) ToStorageNode() *StorageNode {
node := &StorageNode{
Id: s.Id,
SchedulerNodeName: s.SchedulerNodeName,
Cpu: s.Cpu,
MemTotal: s.MemTotal,
MemUsed: s.MemUsed,
MemFree: s.MemFree,
AvgLoad: int64(s.Avgload),
Status: s.Status,
MgmtIp: s.MgmtIp,
DataIp: s.DataIp,
Hostname: s.Hostname,
}
node.Disks = make(map[string]*StorageResource)
for k, v := range s.Disks {
node.Disks[k] = &v
}
node.NodeLabels = make(map[string]string)
for k, v := range s.NodeLabels {
node.NodeLabels[k] = v
}
node.Pools = make([]*StoragePool, len(s.Pools))
for i, v := range s.Pools {
node.Pools[i] = &v
}
return node
}
// ToStorageCluster converts a Cluster structure to an exported gRPC StorageCluster struct
func (c *Cluster) ToStorageCluster() *StorageCluster {
cluster := &StorageCluster{
Status: c.Status,
// Due to history, the cluster ID is normally the name of the cluster, not the
// unique identifier
Name: c.Id,
}
return cluster
}
func CloudBackupStatusTypeToSdkCloudBackupStatusType(
t CloudBackupStatusType,
) SdkCloudBackupStatusType {
switch t {
case CloudBackupStatusNotStarted:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeNotStarted
case CloudBackupStatusDone:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeDone
case CloudBackupStatusAborted:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeAborted
case CloudBackupStatusPaused:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypePaused
case CloudBackupStatusStopped:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeStopped
case CloudBackupStatusActive:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeActive
case CloudBackupStatusFailed:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeFailed
case CloudBackupStatusQueued:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeQueued
default:
return SdkCloudBackupStatusType_SdkCloudBackupStatusTypeUnknown
}
}
func SdkCloudBackupStatusTypeToCloudBackupStatusString(
t SdkCloudBackupStatusType,
) string {
switch t {
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeNotStarted:
return string(CloudBackupStatusNotStarted)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeDone:
return string(CloudBackupStatusDone)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeAborted:
return string(CloudBackupStatusAborted)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypePaused:
return string(CloudBackupStatusPaused)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeStopped:
return string(CloudBackupStatusStopped)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeActive:
return string(CloudBackupStatusActive)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeFailed:
return string(CloudBackupStatusFailed)
case SdkCloudBackupStatusType_SdkCloudBackupStatusTypeQueued:
return string(CloudBackupStatusQueued)
default:
return string(CloudBackupStatusFailed)
}
}
func StringToSdkCloudBackupStatusType(s string) SdkCloudBackupStatusType {
return CloudBackupStatusTypeToSdkCloudBackupStatusType(CloudBackupStatusType(s))
}
func (b *CloudBackupInfo) ToSdkCloudBackupInfo() *SdkCloudBackupInfo {
info := &SdkCloudBackupInfo{
Id: b.ID,
SrcVolumeId: b.SrcVolumeID,
SrcVolumeName: b.SrcVolumeName,
Metadata: b.Metadata,
}
info.Timestamp, _ = ptypes.TimestampProto(b.Timestamp)
info.Status = StringToSdkCloudBackupStatusType(b.Status)
return info
}
func (r *CloudBackupEnumerateResponse) ToSdkCloudBackupEnumerateWithFiltersResponse() *SdkCloudBackupEnumerateWithFiltersResponse {
resp := &SdkCloudBackupEnumerateWithFiltersResponse{
Backups: make([]*SdkCloudBackupInfo, len(r.Backups)),
}
for i, v := range r.Backups {
resp.Backups[i] = v.ToSdkCloudBackupInfo()
}
return resp
}
func CloudBackupOpTypeToSdkCloudBackupOpType(t CloudBackupOpType) SdkCloudBackupOpType {
switch t {
case CloudBackupOp:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeBackupOp
case CloudRestoreOp:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeRestoreOp
default:
return SdkCloudBackupOpType_SdkCloudBackupOpTypeUnknown
}
}
func StringToSdkCloudBackupOpType(s string) SdkCloudBackupOpType {
return CloudBackupOpTypeToSdkCloudBackupOpType(CloudBackupOpType(s))
}
func SdkCloudBackupOpTypeToCloudBackupOpType(t SdkCloudBackupOpType) CloudBackupOpType {
switch t {
case SdkCloudBackupOpType_SdkCloudBackupOpTypeBackupOp:
return CloudBackupOp
case SdkCloudBackupOpType_SdkCloudBackupOpTypeRestoreOp:
return CloudRestoreOp
default:
return CloudBackupOpType("Unknown")
}
}
func (s CloudBackupStatus) ToSdkCloudBackupStatus() *SdkCloudBackupStatus {
status := &SdkCloudBackupStatus{
BackupId: s.ID,
Optype: CloudBackupOpTypeToSdkCloudBackupOpType(s.OpType),
Status: CloudBackupStatusTypeToSdkCloudBackupStatusType(s.Status),
BytesDone: s.BytesDone,
NodeId: s.NodeID,
Info: s.Info,
CredentialId: s.CredentialUUID,
SrcVolumeId: s.SrcVolumeID,
EtaSeconds: s.EtaSeconds,
BytesTotal: s.BytesTotal,
}
status.StartTime, _ = ptypes.TimestampProto(s.StartTime)
status.CompletedTime, _ = ptypes.TimestampProto(s.CompletedTime)
return status
}
func (r *CloudBackupStatusResponse) ToSdkCloudBackupStatusResponse() *SdkCloudBackupStatusResponse {
resp := &SdkCloudBackupStatusResponse{
Statuses: make(map[string]*SdkCloudBackupStatus),
}
for k, v := range r.Statuses {
resp.Statuses[k] = v.ToSdkCloudBackupStatus()
}
return resp
}
func (h CloudBackupHistoryItem) ToSdkCloudBackupHistoryItem() *SdkCloudBackupHistoryItem {
item := &SdkCloudBackupHistoryItem{
SrcVolumeId: h.SrcVolumeID,
Status: StringToSdkCloudBackupStatusType(h.Status),
}
item.Timestamp, _ = ptypes.TimestampProto(h.Timestamp)
return item
}
func (r *CloudBackupHistoryResponse) ToSdkCloudBackupHistoryResponse() *SdkCloudBackupHistoryResponse {
resp := &SdkCloudBackupHistoryResponse{
HistoryList: make([]*SdkCloudBackupHistoryItem, len(r.HistoryList)),
}
for i, v := range r.HistoryList {
resp.HistoryList[i] = v.ToSdkCloudBackupHistoryItem()
}
return resp
}
func (l *VolumeLocator) MergeVolumeSpecLabels(s *VolumeSpec) *VolumeLocator {
for k, v := range s.GetVolumeLabels() {
l.VolumeLabels[k] = v
}
return l
}
func (v *Volume) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool {
return v.GetSpec().IsPermitted(ctx, accessType)
}
func (v *VolumeSpec) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool {
return v.GetOwnership().IsPermittedByContext(ctx, accessType)
}
func (v *VolumeSpec) IsPermittedFromUserInfo(user *auth.UserInfo, accessType Ownership_AccessType) bool {
if v.IsPublic() {
return true
}
if v.GetOwnership() != nil {
return v.GetOwnership().IsPermitted(user, accessType)
}
return true
}
func (v *VolumeSpec) IsPublic() bool {
return v.GetOwnership() == nil || v.GetOwnership().IsPublic()
}
// GetCloneCreatorOwnership returns the appropriate ownership for the
// new snapshot and if an update is required
func (v *VolumeSpec) GetCloneCreatorOwnership(ctx context.Context) (*Ownership, bool) {
o := v.GetOwnership()
// If there is user information, then auth is enabled
if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok {
// Check if the owner is the one who cloned it
if o != nil && o.IsOwner(userinfo) {
return o, false
}
// Not the same owner, we now need new ownership.
// This works for public volumes also.
return OwnershipSetUsernameFromContext(ctx, nil), true
}
return o, false
}
// Check access permission of SdkStoragePolicy Objects
func (s *SdkStoragePolicy) IsPermitted(ctx context.Context, accessType Ownership_AccessType) bool {
if s.IsPublic() {
return true
}
// Storage Policy is not public, check permission
if userinfo, ok := auth.NewUserInfoFromContext(ctx); ok {
// Check Access
return s.IsPermittedFromUserInfo(userinfo, accessType)
} else {
// There is no user information in the context so
// authorization is not running
return true
}
}
func (s *SdkStoragePolicy) IsPermittedFromUserInfo(user *auth.UserInfo, accessType Ownership_AccessType) bool {
if s.IsPublic() {
return true
}
if s.GetOwnership() != nil {
return s.GetOwnership().IsPermitted(user, accessType)
}
return true
}
func (s *SdkStoragePolicy) IsPublic() bool {
return s.GetOwnership() == nil || s.GetOwnership().IsPublic()
}
func CloudBackupRequestedStateToSdkCloudBackupRequestedState(
t string,
) SdkCloudBackupRequestedState {
switch t {
case CloudBackupRequestedStateStop:
return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateStop
case CloudBackupRequestedStatePause:
return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStatePause
case CloudBackupRequestedStateResume:
return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateResume
default:
return SdkCloudBackupRequestedState_SdkCloudBackupRequestedStateUnknown
}
}
// Helpers for volume state action
func (m *VolumeStateAction) IsAttach() bool {
return m.GetAttach() == VolumeActionParam_VOLUME_ACTION_PARAM_ON
}
func (m *VolumeStateAction) IsDetach() bool {
return m.GetAttach() == VolumeActionParam_VOLUME_ACTION_PARAM_OFF
}
func (m *VolumeStateAction) IsMount() bool {
return m.GetMount() == VolumeActionParam_VOLUME_ACTION_PARAM_ON
}
func (m *VolumeStateAction) IsUnMount() bool {
return m.GetMount() == VolumeActionParam_VOLUME_ACTION_PARAM_OFF
}
| 1 | 8,166 | Not sure if we need tag here, it is an implementation detail in portworx that isn't exposed in openstorage | libopenstorage-openstorage | go |
@@ -50,9 +50,15 @@ class Summon extends SolrDefault
* returned as an array of chunks, increasing from least specific to most
* specific.
*
+ * @param bool $extended Whether to return a keyed array with the following
+ * keys:
+ * - heading: the actual subject heading chunks
+ * - type: heading type
+ * - source: source vocabulary
+ *
* @return array
*/
- public function getAllSubjectHeadings()
+ public function getAllSubjectHeadings($extended = false)
{
$retval = [];
$topic = isset($this->fields['SubjectTerms']) ? | 1 | <?php
/**
* Model for Summon records.
*
* PHP version 5
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
namespace VuFind\RecordDriver;
/**
* Model for Summon records.
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
class Summon extends SolrDefault
{
/**
* Date converter
*
* @var \VuFind\Date\Converter
*/
protected $dateConverter = null;
/**
* Get all subject headings associated with this record. Each heading is
* returned as an array of chunks, increasing from least specific to most
* specific.
*
* @return array
*/
public function getAllSubjectHeadings()
{
$retval = [];
$topic = isset($this->fields['SubjectTerms']) ?
$this->fields['SubjectTerms'] : [];
$temporal = isset($this->fields['TemporalSubjectTerms']) ?
$this->fields['TemporalSubjectTerms'] : [];
$geo = isset($this->fields['GeographicLocations']) ?
$this->fields['GeographicLocations'] : [];
$key = isset($this->fields['Keywords']) ?
$this->fields['Keywords'] : [];
$retval = [];
foreach ($topic as $t) {
$retval[] = [trim($t)];
}
foreach ($temporal as $t) {
$retval[] = [trim($t)];
}
foreach ($geo as $g) {
$retval[] = [trim($g)];
}
foreach ($key as $k) {
$retval[] = [trim($k)];
}
return $retval;
}
/**
* Get notes on bibliography content.
*
* @return array
*/
public function getBibliographyNotes()
{
return isset($this->fields['Notes']) ?
$this->fields['Notes'] : [];
}
/**
* Get the call numbers associated with the record (empty string if none).
*
* @return array
*/
public function getCallNumbers()
{
// Summon calls this LCCNum even though it may be Dewey
return isset($this->fields['LCCCallnum'])
&& !empty($this->fields['LCCCallnum'])
? [$this->fields['LCCCallnum']] : [];
}
/**
* Return the first valid DOI found in the record (false if none).
*
* @return mixed
*/
public function getCleanDOI()
{
return (isset($this->fields['DOI'][0]) && !empty($this->fields['DOI'][0]))
? $this->fields['DOI'][0] : false;
}
/**
* Get the edition of the current record.
*
* @return string
*/
public function getEdition()
{
return isset($this->fields['Edition']) ?
$this->fields['Edition'][0] : '';
}
/**
* Get an array of all the formats associated with the record.
*
* @return array
*/
public function getFormats()
{
return isset($this->fields['ContentType'])
? $this->fields['ContentType'] : [];
}
/**
* Get highlighted author data, if available.
*
* @return array
*/
public function getRawAuthorHighlights()
{
// Don't check for highlighted values if highlighting is disabled.
return ($this->highlight && isset($this->fields['Author']))
? $this->fields['Author'] : [];
}
/**
* Pick one line from the highlighted text (if any) to use as a snippet.
*
* @return mixed False if no snippet found, otherwise associative array
* with 'snippet' and 'caption' keys.
*/
public function getHighlightedSnippet()
{
return isset($this->fields['Snippet'][0])
? [
'snippet' => trim($this->fields['Snippet'][0], '.'),
'caption' => ''
]
: false;
}
/**
* Get a highlighted title string, if available.
*
* @return string
*/
public function getHighlightedTitle()
{
// Don't check for highlighted values if highlighting is disabled:
if (!$this->highlight) {
return '';
}
$title = $this->getShortTitle();
$sub = $this->getSubtitle();
return empty($sub) ? $title : "{$title}: {$sub}";
}
/**
* Get an array of all ISBNs associated with the record (may be empty).
*
* @return array
*/
public function getISBNs()
{
if (isset($this->fields['ISBN']) && is_array($this->fields['ISBN'])) {
return $this->fields['ISBN'];
}
return [];
}
/**
* Get an array of all ISSNs associated with the record (may be empty).
*
* @return array
*/
public function getISSNs()
{
$issns = [];
if (isset($this->fields['ISSN'])) {
$issns = $this->fields['ISSN'];
}
if (isset($this->fields['EISSN'])) {
$issns = array_merge($issns, $this->fields['EISSN']);
}
return $issns;
}
/**
* Get an array of all the languages associated with the record.
*
* @return array
*/
public function getLanguages()
{
return isset($this->fields['Language']) ?
$this->fields['Language'] : [];
}
/**
* Get the OCLC number of the record.
*
* @return array
*/
public function getOCLC()
{
return isset($this->fields['OCLC']) ?
$this->fields['OCLC'] : [];
}
/**
* Get the OpenURL parameters to represent this record (useful for the
* title attribute of a COinS span tag).
*
* @param bool $overrideSupportsOpenUrl Flag to override checking
* supportsOpenUrl() (default is false)
*
* @return string OpenURL parameters.
*/
public function getOpenUrl($overrideSupportsOpenUrl = false)
{
// stop here if this record does not support OpenURLs
if (!$overrideSupportsOpenUrl && !$this->supportsOpenUrl()) {
return false;
}
return isset($this->fields['openUrl'])
? $this->fields['openUrl']
: parent::getOpenUrl($overrideSupportsOpenUrl);
}
/**
* Checks the current record if it's supported for generating OpenURLs.
*
* @return bool
*/
public function supportsOpenUrl()
{
// Summon never uses OpenURLs for anything other than COinS:
return false;
}
/**
* Get the item's place of publication.
*
* @return array
*/
public function getPlacesOfPublication()
{
return isset($this->fields['PublicationPlace']) ?
$this->fields['PublicationPlace'] : [];
}
/**
* Pass in a date converter
*
* @param \VuFind\Date\Converter $dc Date converter
*
* @return void
*/
public function setDateConverter(\VuFind\Date\Converter $dc)
{
$this->dateConverter = $dc;
}
/**
* Get a date converter
*
* @return \VuFind\Date\Converter
*/
protected function getDateConverter()
{
// No object passed in yet? Build one with default settings:
if (null === $this->dateConverter) {
$this->dateConverter = new \VuFind\Date\Converter();
}
return $this->dateConverter;
}
/**
* Get the publication dates of the record. See also getDateSpan().
*
* @return array
*/
public function getPublicationDates()
{
if (isset($this->fields['PublicationDate_xml'])
&& is_array($this->fields['PublicationDate_xml'])
) {
$dates = [];
$converter = $this->getDateConverter();
foreach ($this->fields['PublicationDate_xml'] as $current) {
if (isset($current['month']) && isset($current['year'])) {
if (!isset($current['day'])) {
$current['day'] = 1;
}
$dates[] = $converter->convertToDisplayDate(
'm-d-Y',
"{$current['month']}-{$current['day']}-{$current['year']}"
);
} else if (isset($current['year'])) {
$dates[] = $current['year'];
}
}
if (!empty($dates)) {
return $dates;
}
}
return isset($this->fields['PublicationDate']) ?
$this->fields['PublicationDate'] : [];
}
/**
* Get the publishers of the record.
*
* @return array
*/
public function getPublishers()
{
return isset($this->fields['Publisher']) ?
$this->fields['Publisher'] : [];
}
/**
* Get an array of all primary authors.
*
* @return array
*/
public function getPrimaryAuthors()
{
$authors = [];
if (isset($this->fields['Author_xml'])) {
for ($i = 0; $i < count($this->fields['Author_xml']); $i++) {
if (isset($this->fields['Author_xml'][$i]['fullname'])) {
$authors[] = $this->fields['Author_xml'][$i]['fullname'];
}
}
}
return $authors;
}
/**
* Get an array of all series names containing the record. Array entries may
* be either the name string, or an associative array with 'name' and 'number'
* keys.
*
* @return array
*/
public function getSeries()
{
return isset($this->fields['PublicationSeriesTitle'])
? $this->fields['PublicationSeriesTitle'] : [];
}
/**
* Get the short (pre-subtitle) title of the record.
*
* @return string
*/
public function getShortTitle()
{
return isset($this->fields['Title']) ?
$this->fields['Title'][0] : '';
}
/**
* Get the subtitle of the record.
*
* @return string
*/
public function getSubtitle()
{
return isset($this->fields['Subtitle']) ?
$this->fields['Subtitle'][0] : '';
}
/**
* Get an array of summary strings for the record.
*
* @return array
*/
public function getSummary()
{
return isset($this->fields['Abstract']) ?
$this->fields['Abstract'] : [];
}
/**
* Returns one of three things: a full URL to a thumbnail preview of the record
* if an image is available in an external system; an array of parameters to
* send to VuFind's internal cover generator if no fixed URL exists; or false
* if no thumbnail can be generated.
*
* @param string $size Size of thumbnail (small, medium or large -- small is
* default).
*
* @return string|array|bool
*/
public function getThumbnail($size = 'small')
{
$params = parent::getThumbnail($size);
// Support thumbnails embedded in the Summon record when no unique identifier
// is found... (We don't use them in cases where we have an identifier, since
// we want to allow these to be passed to configured external services).
if (!isset($params['oclc']) && !isset($params['issn'])
&& !isset($params['isbn']) && !isset($params['upc'])
) {
if ($size === 'small' && isset($this->fields['thumbnail_s'][0])) {
return ['proxy' => $this->fields['thumbnail_s'][0]];
} else if (isset($this->fields['thumbnail_m'][0])) {
return ['proxy' => $this->fields['thumbnail_m'][0]];
}
}
$formats = $this->getFormats();
if (!empty($formats)) {
$params['contenttype'] = $formats[0];
}
return $params;
}
/**
* Get the full title of the record.
*
* @return string
*/
public function getTitle()
{
$title = $this->getShortTitle();
$sub = $this->getSubtitle();
$title = empty($sub) ? $title : "{$title}: {$sub}";
return str_replace(
['{{{{START_HILITE}}}}', '{{{{END_HILITE}}}}'], '', $title
);
}
/**
* Get an array of lines from the table of contents.
*
* @return array
*/
public function getTOC()
{
return isset($this->fields['TableOfContents'])
? $this->fields['TableOfContents'] : [];
}
/**
* Return an array of associative URL arrays with one or more of the following
* keys:
*
* <li>
* <ul>desc: URL description text to display (optional)</ul>
* <ul>url: fully-formed URL (required if 'route' is absent)</ul>
* <ul>route: VuFind route to build URL with (required if 'url' is absent)</ul>
* <ul>routeParams: Parameters for route (optional)</ul>
* <ul>queryString: Query params to append after building route (optional)</ul>
* </li>
*
* @return array
*/
public function getURLs()
{
if (isset($this->fields['link'])) {
return [
[
'url' => $this->fields['link'],
'desc' => $this->translate('Get full text')
]
];
}
$retVal = [];
if (isset($this->fields['url']) && is_array($this->fields['url'])) {
foreach ($this->fields['url'] as $desc => $url) {
$retVal[] = ['url' => $url, 'desc' => $desc];
}
}
return $retVal;
}
/**
* Return the unique identifier of this record within the Solr index;
* useful for retrieving additional information (like tags and user
* comments) from the external MySQL database.
*
* @return string Unique identifier.
*/
public function getUniqueID()
{
return $this->fields['ID'][0];
}
/**
* Get the title of the item that contains this record (i.e. MARC 773s of a
* journal).
*
* @return string
*/
public function getContainerTitle()
{
return isset($this->fields['PublicationTitle'])
? $this->fields['PublicationTitle'][0] : '';
}
/**
* Get the volume of the item that contains this record (i.e. MARC 773v of a
* journal).
*
* @return string
*/
public function getContainerVolume()
{
return (isset($this->fields['Volume'])) ? $this->fields['Volume'][0] : '';
}
/**
* Get the issue of the item that contains this record (i.e. MARC 773l of a
* journal).
*
* @return string
*/
public function getContainerIssue()
{
return (isset($this->fields['Issue'])) ? $this->fields['Issue'][0] : '';
}
/**
* Get the start page of the item that contains this record (i.e. MARC 773q of a
* journal).
*
* @return string
*/
public function getContainerStartPage()
{
return (isset($this->fields['StartPage']))
? $this->fields['StartPage'][0] : '';
}
/**
* Get the end page of the item that contains this record.
*
* @return string
*/
public function getContainerEndPage()
{
if (isset($this->fields['EndPage'])) {
return $this->fields['EndPage'][0];
} else if (isset($this->fields['PageCount'])
&& $this->fields['PageCount'] > 1
&& intval($this->fields['StartPage'][0]) > 0
) {
return $this->fields['StartPage'][0] + $this->fields['PageCount'][0] - 1;
}
return $this->getContainerStartPage();
}
/**
* Get a full, free-form reference to the context of the item that contains this
* record (i.e. volume, year, issue, pages).
*
* @return string
*/
public function getContainerReference()
{
$str = '';
$vol = $this->getContainerVolume();
if (!empty($vol)) {
$str .= $this->translate('citation_volume_abbrev')
. ' ' . $vol;
}
$no = $this->getContainerIssue();
if (!empty($no)) {
if (strlen($str) > 0) {
$str .= '; ';
}
$str .= $this->translate('citation_issue_abbrev')
. ' ' . $no;
}
$start = $this->getContainerStartPage();
if (!empty($start)) {
if (strlen($str) > 0) {
$str .= '; ';
}
$end = $this->getContainerEndPage();
if ($start == $end) {
$str .= $this->translate('citation_singlepage_abbrev')
. ' ' . $start;
} else {
$str .= $this->translate('citation_multipage_abbrev')
. ' ' . $start . ' - ' . $end;
}
}
return $str;
}
}
| 1 | 24,974 | I wonder if it would be cleaner to refactor all of this similar to the SolrMarc driver, so we have a property with Summon field names associated with types, and we iterate through it in a loop... that way we don't have to repeat the loop code four times with different variable names. | vufind-org-vufind | php |
@@ -261,7 +261,7 @@ public class XCJFQuery extends Query {
}
private DocSet getDocSet() throws IOException {
- SolrClientCache solrClientCache = new SolrClientCache();
+ SolrClientCache solrClientCache = searcher.getCore().getCoreContainer().getSolrClientCache();
TupleStream solrStream;
if (zkHost != null || solrUrl == null) {
solrStream = createCloudSolrStream(solrClientCache); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search.join;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.FixedBitSet;
import org.apache.solr.client.solrj.io.SolrClientCache;
import org.apache.solr.client.solrj.io.Tuple;
import org.apache.solr.client.solrj.io.eq.FieldEqualitor;
import org.apache.solr.client.solrj.io.stream.CloudSolrStream;
import org.apache.solr.client.solrj.io.stream.SolrStream;
import org.apache.solr.client.solrj.io.stream.StreamContext;
import org.apache.solr.client.solrj.io.stream.TupleStream;
import org.apache.solr.client.solrj.io.stream.UniqueStream;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpression;
import org.apache.solr.client.solrj.io.stream.expr.StreamExpressionNamedParameter;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.schema.FieldType;
import org.apache.solr.search.BitDocSet;
import org.apache.solr.search.DocSet;
import org.apache.solr.search.DocSetUtil;
import org.apache.solr.search.Filter;
import org.apache.solr.search.SolrIndexSearcher;
public class XCJFQuery extends Query {
protected final String query;
protected final String zkHost;
protected final String solrUrl;
protected final String collection;
protected final String fromField;
protected final String toField;
protected final boolean routedByJoinKey;
protected final long timestamp;
protected final int ttl;
protected SolrParams otherParams;
protected String otherParamsString;
public XCJFQuery(String query, String zkHost, String solrUrl, String collection, String fromField, String toField,
boolean routedByJoinKey, int ttl, SolrParams otherParams) {
this.query = query;
this.zkHost = zkHost;
this.solrUrl = solrUrl;
this.collection = collection;
this.fromField = fromField;
this.toField = toField;
this.routedByJoinKey = routedByJoinKey;
this.timestamp = System.nanoTime();
this.ttl = ttl;
this.otherParams = otherParams;
// SolrParams doesn't implement equals(), so use this string to compare them
if (otherParams != null) {
this.otherParamsString = otherParams.toString();
}
}
private interface JoinKeyCollector {
void collect(Object value) throws IOException;
DocSet getDocSet() throws IOException;
}
private class TermsJoinKeyCollector implements JoinKeyCollector {
FieldType fieldType;
SolrIndexSearcher searcher;
TermsEnum termsEnum;
BytesRefBuilder bytes;
PostingsEnum postingsEnum;
FixedBitSet bitSet;
public TermsJoinKeyCollector(FieldType fieldType, Terms terms, SolrIndexSearcher searcher) throws IOException {
this.fieldType = fieldType;
this.searcher = searcher;
termsEnum = terms.iterator();
bytes = new BytesRefBuilder();
bitSet = new FixedBitSet(searcher.maxDoc());
}
@Override
public void collect(Object value) throws IOException {
fieldType.readableToIndexed((String) value, bytes);
if (termsEnum.seekExact(bytes.get())) {
postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
bitSet.or(postingsEnum);
}
}
@Override
public DocSet getDocSet() throws IOException {
if (searcher.getIndexReader().hasDeletions()) {
bitSet.and(searcher.getLiveDocSet().getBits());
}
return new BitDocSet(bitSet);
}
}
private class PointJoinKeyCollector extends GraphPointsCollector implements JoinKeyCollector {
SolrIndexSearcher searcher;
public PointJoinKeyCollector(SolrIndexSearcher searcher) {
super(searcher.getSchema().getField(toField), null, null);
this.searcher = searcher;
}
@Override
public void collect(Object value) throws IOException {
if (value instanceof Long || value instanceof Integer) {
set.add(((Number) value).longValue());
} else {
throw new UnsupportedOperationException("Unsupported field type for XCJFQuery");
}
}
@Override
public DocSet getDocSet() throws IOException {
Query query = getResultQuery(searcher.getSchema().getField(toField), false);
if (query == null) {
return DocSet.empty();
}
return DocSetUtil.createDocSet(searcher, query, null);
}
}
private class XCJFQueryWeight extends ConstantScoreWeight {
private SolrIndexSearcher searcher;
private ScoreMode scoreMode;
private Filter filter;
public XCJFQueryWeight(SolrIndexSearcher searcher, ScoreMode scoreMode, float score) {
super(XCJFQuery.this, score);
this.scoreMode = scoreMode;
this.searcher = searcher;
}
private String createHashRangeFq() {
if (routedByJoinKey) {
ClusterState clusterState = searcher.getCore().getCoreContainer().getZkController().getClusterState();
CloudDescriptor desc = searcher.getCore().getCoreDescriptor().getCloudDescriptor();
Slice slice = clusterState.getCollection(desc.getCollectionName()).getSlicesMap().get(desc.getShardId());
DocRouter.Range range = slice.getRange();
// In CompositeIdRouter, the routing prefix only affects the top 16 bits
int min = range.min & 0xffff0000;
int max = range.max | 0x0000ffff;
return String.format(Locale.ROOT, "{!hash_range f=%s l=%d u=%d}", fromField, min, max);
} else {
return null;
}
}
private TupleStream createCloudSolrStream(SolrClientCache solrClientCache) throws IOException {
String streamZkHost;
if (zkHost != null) {
streamZkHost = zkHost;
} else {
streamZkHost = searcher.getCore().getCoreContainer().getZkController().getZkServerAddress();
}
ModifiableSolrParams params = new ModifiableSolrParams(otherParams);
params.set(CommonParams.Q, query);
String fq = createHashRangeFq();
if (fq != null) {
params.add(CommonParams.FQ, fq);
}
params.set(CommonParams.FL, fromField);
params.set(CommonParams.SORT, fromField + " asc");
params.set(CommonParams.QT, "/export");
params.set(CommonParams.WT, CommonParams.JAVABIN);
StreamContext streamContext = new StreamContext();
streamContext.setSolrClientCache(solrClientCache);
TupleStream cloudSolrStream = new CloudSolrStream(streamZkHost, collection, params);
TupleStream uniqueStream = new UniqueStream(cloudSolrStream, new FieldEqualitor(fromField));
uniqueStream.setStreamContext(streamContext);
return uniqueStream;
}
private TupleStream createSolrStream() {
StreamExpression searchExpr = new StreamExpression("search")
.withParameter(collection)
.withParameter(new StreamExpressionNamedParameter(CommonParams.Q, query));
String fq = createHashRangeFq();
if (fq != null) {
searchExpr.withParameter(new StreamExpressionNamedParameter(CommonParams.FQ, fq));
}
searchExpr.withParameter(new StreamExpressionNamedParameter(CommonParams.FL, fromField))
.withParameter(new StreamExpressionNamedParameter(CommonParams.SORT, fromField + " asc"))
.withParameter(new StreamExpressionNamedParameter(CommonParams.QT, "/export"));
for (Map.Entry<String,String[]> entry : otherParams) {
for (String value : entry.getValue()) {
searchExpr.withParameter(new StreamExpressionNamedParameter(entry.getKey(), value));
}
}
StreamExpression uniqueExpr = new StreamExpression("unique");
uniqueExpr.withParameter(searchExpr)
.withParameter(new StreamExpressionNamedParameter("over", fromField));
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("expr", uniqueExpr.toString());
params.set(CommonParams.QT, "/stream");
params.set(CommonParams.WT, CommonParams.JAVABIN);
return new SolrStream(solrUrl + "/" + collection, params);
}
private DocSet getDocSet() throws IOException {
SolrClientCache solrClientCache = new SolrClientCache();
TupleStream solrStream;
if (zkHost != null || solrUrl == null) {
solrStream = createCloudSolrStream(solrClientCache);
} else {
solrStream = createSolrStream();
}
FieldType fieldType = searcher.getSchema().getFieldType(toField);
JoinKeyCollector collector;
if (fieldType.isPointField()) {
collector = new PointJoinKeyCollector(searcher);
} else {
Terms terms = searcher.getSlowAtomicReader().terms(toField);
if (terms == null) {
return DocSet.empty();
}
collector = new TermsJoinKeyCollector(fieldType, terms, searcher);
}
try {
solrStream.open();
while (true) {
Tuple tuple = solrStream.read();
if (tuple.EXCEPTION) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, tuple.getException());
}
if (tuple.EOF) {
break;
}
Object value = tuple.get(fromField);
collector.collect(value);
}
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
solrStream.close();
solrClientCache.close();
}
return collector.getDocSet();
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
if (filter == null) {
filter = getDocSet().getTopFilter();
}
DocIdSet readerSet = filter.getDocIdSet(context, null);
if (readerSet == null) {
return null;
}
DocIdSetIterator readerSetIterator = readerSet.iterator();
if (readerSetIterator == null) {
return null;
}
return new ConstantScoreScorer(this, score(), scoreMode, readerSetIterator);
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return false;
}
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new XCJFQueryWeight((SolrIndexSearcher) searcher, scoreMode, boost);
}
@Override
public void visit(QueryVisitor visitor) {
visitor.visitLeaf(this);
}
@Override
public int hashCode() {
final int prime = 31;
int result = classHash();
result = prime * result + Objects.hashCode(query);
result = prime * result + Objects.hashCode(zkHost);
result = prime * result + Objects.hashCode(solrUrl);
result = prime * result + Objects.hashCode(collection);
result = prime * result + Objects.hashCode(fromField);
result = prime * result + Objects.hashCode(toField);
result = prime * result + Objects.hashCode(routedByJoinKey);
result = prime * result + Objects.hashCode(otherParamsString);
// timestamp and ttl should not be included in hash code
return result;
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) &&
equalsTo(getClass().cast(other));
}
private boolean equalsTo(XCJFQuery other) {
return Objects.equals(query, other.query) &&
Objects.equals(zkHost, other.zkHost) &&
Objects.equals(solrUrl, other.solrUrl) &&
Objects.equals(collection, other.collection) &&
Objects.equals(fromField, other.fromField) &&
Objects.equals(toField, other.toField) &&
Objects.equals(routedByJoinKey, other.routedByJoinKey) &&
Objects.equals(otherParamsString, other.otherParamsString) &&
TimeUnit.SECONDS.convert(Math.abs(timestamp - other.timestamp), TimeUnit.NANOSECONDS) < Math.min(ttl, other.ttl);
}
@Override
public String toString(String field) {
return String.format(Locale.ROOT, "{!xcjf collection=%s from=%s to=%s routed=%b ttl=%d}%s",
collection, fromField, toField, routedByJoinKey, ttl, query.toString());
}
}
| 1 | 34,145 | Need the `solrClientCache.close();` further down in the method be removed since a shared cache is now used? | apache-lucene-solr | java |
@@ -38,6 +38,9 @@ type (
ClusterMetadataRow struct {
ImmutableData []byte
ImmutableDataEncoding string
+ Data []byte
+ DataEncoding string
+ Version int64
}
// ClusterMembershipRow represents a row in the cluster_membership table | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package sqlplugin
import (
"time"
enumspb "go.temporal.io/api/enums/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives"
)
type (
// ClusterMetadataRow represents a row in the cluster_metadata table
ClusterMetadataRow struct {
ImmutableData []byte
ImmutableDataEncoding string
}
// ClusterMembershipRow represents a row in the cluster_membership table
ClusterMembershipRow struct {
Role persistence.ServiceType
HostID []byte
RPCAddress string
RPCPort uint16
SessionStart time.Time
LastHeartbeat time.Time
RecordExpiry time.Time
InsertionOrder uint64
}
// ClusterMembershipFilter is used for GetClusterMembership queries
ClusterMembershipFilter struct {
RPCAddressEquals string
HostIDEquals []byte
HostIDGreaterThan []byte
RoleEquals persistence.ServiceType
LastHeartbeatAfter time.Time
RecordExpiryAfter time.Time
SessionStartedAfter time.Time
MaxRecordCount int
}
// PruneClusterMembershipFilter is used for PruneClusterMembership queries
PruneClusterMembershipFilter struct {
PruneRecordsBefore time.Time
MaxRecordsAffected int
}
// NamespaceRow represents a row in namespace table
NamespaceRow struct {
ID primitives.UUID
Name string
Data []byte
DataEncoding string
IsGlobal bool
NotificationVersion int64
}
// NamespaceFilter contains the column names within namespace table that
// can be used to filter results through a WHERE clause. When ID is not
// nil, it will be used for WHERE condition. If ID is nil and Name is non-nil,
// Name will be used for WHERE condition. When both ID and Name are nil,
// no WHERE clause will be used
NamespaceFilter struct {
ID *primitives.UUID
Name *string
GreaterThanID *primitives.UUID
PageSize *int
}
// NamespaceMetadataRow represents a row in namespace_metadata table
NamespaceMetadataRow struct {
NotificationVersion int64
}
// ShardsRow represents a row in shards table
ShardsRow struct {
ShardID int64
RangeID int64
Data []byte
DataEncoding string
}
// ShardsFilter contains the column names within shards table that
// can be used to filter results through a WHERE clause
ShardsFilter struct {
ShardID int64
}
// ExecutionsRow represents a row in executions table
ExecutionsRow struct {
ShardID int
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
NextEventID int64
LastWriteVersion int64
Data []byte
DataEncoding string
State []byte
StateEncoding string
VersionHistories []byte
VersionHistoriesEncoding string
}
// ExecutionsFilter contains the column names within executions table that
// can be used to filter results through a WHERE clause
ExecutionsFilter struct {
ShardID int
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
}
// CurrentExecutionsRow represents a row in current_executions table
CurrentExecutionsRow struct {
ShardID int64
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
CreateRequestID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
StartVersion int64
}
// CurrentExecutionsFilter contains the column names within current_executions table that
// can be used to filter results through a WHERE clause
CurrentExecutionsFilter struct {
ShardID int64
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
}
// BufferedEventsRow represents a row in buffered_events table
BufferedEventsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
Data []byte
DataEncoding string
}
// BufferedEventsFilter contains the column names within buffered_events table that
// can be used to filter results through a WHERE clause
BufferedEventsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
}
// TasksRow represents a row in tasks table
TasksRow struct {
RangeHash uint32
TaskQueueID []byte
TaskID int64
Data []byte
DataEncoding string
}
// TasksFilter contains the column names within tasks table that
// can be used to filter results through a WHERE clause
TasksFilter struct {
RangeHash uint32
TaskQueueID []byte
TaskID *int64
MinTaskID *int64
MaxTaskID *int64
TaskIDLessThanEquals *int64
Limit *int
PageSize *int
}
// TaskQueuesRow represents a row in task_queues table
TaskQueuesRow struct {
RangeHash uint32
TaskQueueID []byte
RangeID int64
Data []byte
DataEncoding string
}
// TaskQueuesFilter contains the column names within task_queues table that
// can be used to filter results through a WHERE clause
TaskQueuesFilter struct {
RangeHash uint32
RangeHashGreaterThanEqualTo uint32
RangeHashLessThanEqualTo uint32
TaskQueueID []byte
TaskQueueIDGreaterThan []byte
RangeID *int64
PageSize *int
}
// ReplicationTasksRow represents a row in replication_tasks table
ReplicationTasksRow struct {
ShardID int32
TaskID int64
Data []byte
DataEncoding string
}
// ReplicationTaskDLQRow represents a row in replication_tasks_dlq table
ReplicationTaskDLQRow struct {
SourceClusterName string
ShardID int
TaskID int64
Data []byte
DataEncoding string
}
// ReplicationTasksFilter contains the column names within replication_tasks table that
// can be used to filter results through a WHERE clause
ReplicationTasksFilter struct {
ShardID int32
TaskID int64
InclusiveEndTaskID int64
MinTaskID int64
MaxTaskID int64
PageSize int
}
// ReplicationTasksDLQFilter contains the column names within replication_tasks_dlq table that
// can be used to filter results through a WHERE clause
ReplicationTasksDLQFilter struct {
ReplicationTasksFilter
SourceClusterName string
}
// EventsRow represents a row in events table
EventsRow struct {
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
FirstEventID int64
BatchVersion int64
RangeID int64
TxID int64
Data []byte
DataEncoding string
}
// EventsFilter contains the column names within events table that
// can be used to filter results through a WHERE clause
EventsFilter struct {
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
FirstEventID *int64
NextEventID *int64
PageSize *int
}
// HistoryNodeRow represents a row in history_node table
HistoryNodeRow struct {
ShardID int
TreeID primitives.UUID
BranchID primitives.UUID
NodeID int64
// use pointer so that it's easier to multiple by -1
TxnID *int64
Data []byte
DataEncoding string
}
// HistoryNodeFilter contains the column names within history_node table that
// can be used to filter results through a WHERE clause
HistoryNodeFilter struct {
ShardID int
TreeID primitives.UUID
BranchID primitives.UUID
// Inclusive
MinNodeID *int64
// Exclusive
MaxNodeID *int64
PageSize *int
}
// HistoryTreeRow represents a row in history_tree table
HistoryTreeRow struct {
ShardID int
TreeID primitives.UUID
BranchID primitives.UUID
Data []byte
DataEncoding string
}
// HistoryTreeFilter contains the column names within history_tree table that
// can be used to filter results through a WHERE clause
HistoryTreeFilter struct {
ShardID int
TreeID primitives.UUID
BranchID primitives.UUID
}
// ActivityInfoMapsRow represents a row in activity_info_maps table
ActivityInfoMapsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
ScheduleID int64
Data []byte
DataEncoding string
}
// ActivityInfoMapsFilter contains the column names within activity_info_maps table that
// can be used to filter results through a WHERE clause
ActivityInfoMapsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
ScheduleID *int64
}
// TimerInfoMapsRow represents a row in timer_info_maps table
TimerInfoMapsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
TimerID string
Data []byte
DataEncoding string
}
// TimerInfoMapsFilter contains the column names within timer_info_maps table that
// can be used to filter results through a WHERE clause
TimerInfoMapsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
TimerID *string
}
// ChildExecutionInfoMapsRow represents a row in child_execution_info_maps table
ChildExecutionInfoMapsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID int64
Data []byte
DataEncoding string
}
// ChildExecutionInfoMapsFilter contains the column names within child_execution_info_maps table that
// can be used to filter results through a WHERE clause
ChildExecutionInfoMapsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID *int64
}
// RequestCancelInfoMapsRow represents a row in request_cancel_info_maps table
RequestCancelInfoMapsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID int64
Data []byte
DataEncoding string
}
// RequestCancelInfoMapsFilter contains the column names within request_cancel_info_maps table that
// can be used to filter results through a WHERE clause
RequestCancelInfoMapsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID *int64
}
// SignalInfoMapsRow represents a row in signal_info_maps table
SignalInfoMapsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID int64
Data []byte
DataEncoding string
}
// SignalInfoMapsFilter contains the column names within signal_info_maps table that
// can be used to filter results through a WHERE clause
SignalInfoMapsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
InitiatedID *int64
}
// SignalsRequestedSetsRow represents a row in signals_requested_sets table
SignalsRequestedSetsRow struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
SignalID string
}
// SignalsRequestedSetsFilter contains the column names within signals_requested_sets table that
// can be used to filter results through a WHERE clause
SignalsRequestedSetsFilter struct {
ShardID int32
NamespaceID primitives.UUID
WorkflowID string
RunID primitives.UUID
SignalID *string
}
// VisibilityRow represents a row in executions_visibility table
VisibilityRow struct {
NamespaceID string
RunID string
WorkflowTypeName string
WorkflowID string
StartTime time.Time
ExecutionTime time.Time
Status int32
CloseTime *time.Time
HistoryLength *int64
Memo []byte
Encoding string
}
// VisibilityFilter contains the column names within executions_visibility table that
// can be used to filter results through a WHERE clause
VisibilityFilter struct {
NamespaceID string
RunID *string
WorkflowID *string
WorkflowTypeName *string
Status int32
MinStartTime *time.Time
MaxStartTime *time.Time
PageSize *int
}
// QueueRow represents a row in queue table
QueueRow struct {
QueueType persistence.QueueType
MessageID int64
MessagePayload []byte
}
// QueueMetadataRow represents a row in queue_metadata table
QueueMetadataRow struct {
QueueType persistence.QueueType
Data []byte
}
)
| 1 | 10,332 | according to the PR (schema changes?) these 2 field should be removed? | temporalio-temporal | go |
@@ -373,16 +373,13 @@ func (s *Service) createStressChaos(exp *core.ExperimentInfo, kubeCli client.Cli
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
+ ContainerNames: exp.Target.StressChaos.ContainerNames,
},
Stressors: stressors,
StressngStressors: exp.Target.StressChaos.StressngStressors,
},
}
- if exp.Target.StressChaos.ContainerName != nil {
- chaos.Spec.ContainerNames = []string{*exp.Target.StressChaos.ContainerName}
- }
-
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
} | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package experiment
import (
"context"
"encoding/json"
"fmt"
"net/http"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"golang.org/x/sync/errgroup"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/finalizers"
"github.com/chaos-mesh/chaos-mesh/pkg/apiserver/utils"
"github.com/chaos-mesh/chaos-mesh/pkg/clientpool"
dashboardconfig "github.com/chaos-mesh/chaos-mesh/pkg/config/dashboard"
"github.com/chaos-mesh/chaos-mesh/pkg/core"
)
var log = ctrl.Log.WithName("experiment api")
// Service defines a handler service for experiments.
type Service struct {
archive core.ExperimentStore
event core.EventStore
conf *dashboardconfig.ChaosDashboardConfig
scheme *runtime.Scheme
}
// NewService returns an experiment service instance.
func NewService(
archive core.ExperimentStore,
event core.EventStore,
conf *dashboardconfig.ChaosDashboardConfig,
scheme *runtime.Scheme,
) *Service {
return &Service{
archive: archive,
event: event,
conf: conf,
scheme: scheme,
}
}
// Register mounts HTTP handler on the mux.
func Register(r *gin.RouterGroup, s *Service) {
endpoint := r.Group("/experiments")
endpoint.GET("", s.listExperiments)
endpoint.POST("/new", s.createExperiment)
endpoint.GET("/detail/:uid", s.getExperimentDetail)
endpoint.DELETE("/:uid", s.deleteExperiment)
endpoint.DELETE("/", s.batchDeleteExperiment)
endpoint.PUT("/update", s.updateExperiment)
endpoint.PUT("/pause/:uid", s.pauseExperiment)
endpoint.PUT("/start/:uid", s.startExperiment)
endpoint.GET("/state", s.state)
}
// ChaosState defines the number of chaos experiments of each phase
type ChaosState struct {
Injecting int `json:"injecting"`
Running int `json:"running"`
Finished int `json:"finished"`
Paused int `json:"paused"`
}
// Base represents the base info of an experiment.
type Base struct {
Kind string `json:"kind"`
Namespace string `json:"namespace"`
Name string `json:"name"`
}
// Experiment defines the basic information of an experiment
type Experiment struct {
Base
UID string `json:"uid"`
Created string `json:"created_at"`
Status string `json:"status"`
FailedMessage string `json:"failed_message,omitempty"`
}
// Detail represents an experiment instance.
type Detail struct {
Experiment
KubeObject core.KubeObjectDesc `json:"kube_object"`
}
type createExperimentFunc func(*core.ExperimentInfo, client.Client) error
type updateExperimentFunc func(*core.KubeObjectDesc, client.Client) error
// StatusResponse defines a common status struct.
type StatusResponse struct {
Status string `json:"status"`
}
// @Summary Create a new chaos experiment.
// @Description Create a new chaos experiment.
// @Tags experiments
// @Produce json
// @Param request body core.ExperimentInfo true "Request body"
// @Success 200 {object} core.ExperimentInfo
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments/new [post]
func (s *Service) createExperiment(c *gin.Context) {
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
exp := &core.ExperimentInfo{}
if err := c.ShouldBindJSON(exp); err != nil {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
createFuncs := map[string]createExperimentFunc{
v1alpha1.KindPodChaos: s.createPodChaos,
v1alpha1.KindNetworkChaos: s.createNetworkChaos,
v1alpha1.KindIOChaos: s.createIOChaos,
v1alpha1.KindStressChaos: s.createStressChaos,
v1alpha1.KindTimeChaos: s.createTimeChaos,
v1alpha1.KindKernelChaos: s.createKernelChaos,
v1alpha1.KindDNSChaos: s.createDNSChaos,
v1alpha1.KindAWSChaos: s.createAWSChaos,
v1alpha1.KindGCPChaos: s.createGCPChaos,
}
f, ok := createFuncs[exp.Target.Kind]
if !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(exp.Target.Kind + " is not supported"))
return
}
if err := f(exp, kubeCli); err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, exp)
}
func (s *Service) createPodChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.PodChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.PodChaosSpec{
ContainerSelector: v1alpha1.ContainerSelector{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
},
Action: v1alpha1.PodChaosAction(exp.Target.PodChaos.Action),
GracePeriod: exp.Target.PodChaos.GracePeriod,
},
}
if v1alpha1.PodChaosAction(exp.Target.PodChaos.Action) == v1alpha1.ContainerKillAction {
chaos.Spec.ContainerNames = exp.Target.PodChaos.ContainerNames
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createNetworkChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.NetworkChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.NetworkChaosSpec{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
Action: v1alpha1.NetworkChaosAction(exp.Target.NetworkChaos.Action),
TcParameter: v1alpha1.TcParameter{
Delay: exp.Target.NetworkChaos.Delay,
Loss: exp.Target.NetworkChaos.Loss,
Duplicate: exp.Target.NetworkChaos.Duplicate,
Corrupt: exp.Target.NetworkChaos.Corrupt,
Bandwidth: exp.Target.NetworkChaos.Bandwidth,
},
Direction: v1alpha1.Direction(exp.Target.NetworkChaos.Direction),
ExternalTargets: exp.Target.NetworkChaos.ExternalTargets,
},
}
if exp.Target.NetworkChaos.TargetScope != nil {
chaos.Spec.Target = &v1alpha1.PodSelector{
Selector: exp.Target.NetworkChaos.TargetScope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Target.NetworkChaos.TargetScope.Mode),
Value: exp.Target.NetworkChaos.TargetScope.Value,
}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createIOChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.IOChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.IOChaosSpec{
ContainerSelector: v1alpha1.ContainerSelector{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
},
Action: v1alpha1.IOChaosType(exp.Target.IOChaos.Action),
Delay: exp.Target.IOChaos.Delay,
Errno: exp.Target.IOChaos.Errno,
Attr: exp.Target.IOChaos.Attr,
Mistake: exp.Target.IOChaos.Mistake,
Path: exp.Target.IOChaos.Path,
Methods: exp.Target.IOChaos.Methods,
Percent: exp.Target.IOChaos.Percent,
VolumePath: exp.Target.IOChaos.VolumePath,
},
}
if exp.Target.IOChaos.ContainerName != "" {
chaos.Spec.ContainerNames = []string{exp.Target.IOChaos.ContainerName}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createTimeChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.TimeChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.TimeChaosSpec{
ContainerSelector: v1alpha1.ContainerSelector{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
},
TimeOffset: exp.Target.TimeChaos.TimeOffset,
ClockIds: exp.Target.TimeChaos.ClockIDs,
},
}
if len(exp.Target.TimeChaos.ContainerNames) != 0 {
chaos.Spec.ContainerNames = exp.Target.TimeChaos.ContainerNames
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createKernelChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.KernelChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.KernelChaosSpec{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
FailKernRequest: exp.Target.KernelChaos.FailKernRequest,
},
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createStressChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
var stressors *v1alpha1.Stressors
// Error checking
if exp.Target.StressChaos.Stressors.CPUStressor.Workers <= 0 && exp.Target.StressChaos.Stressors.MemoryStressor.Workers > 0 {
stressors = &v1alpha1.Stressors{
MemoryStressor: exp.Target.StressChaos.Stressors.MemoryStressor,
}
} else if exp.Target.StressChaos.Stressors.MemoryStressor.Workers <= 0 && exp.Target.StressChaos.Stressors.CPUStressor.Workers > 0 {
stressors = &v1alpha1.Stressors{
CPUStressor: exp.Target.StressChaos.Stressors.CPUStressor,
}
} else {
stressors = exp.Target.StressChaos.Stressors
}
chaos := &v1alpha1.StressChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.StressChaosSpec{
ContainerSelector: v1alpha1.ContainerSelector{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
},
Stressors: stressors,
StressngStressors: exp.Target.StressChaos.StressngStressors,
},
}
if exp.Target.StressChaos.ContainerName != nil {
chaos.Spec.ContainerNames = []string{*exp.Target.StressChaos.ContainerName}
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createDNSChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.DNSChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.DNSChaosSpec{
Action: v1alpha1.DNSChaosAction(exp.Target.DNSChaos.Action),
ContainerSelector: v1alpha1.ContainerSelector{
PodSelector: v1alpha1.PodSelector{
Selector: exp.Scope.ParseSelector(),
Mode: v1alpha1.PodMode(exp.Scope.Mode),
Value: exp.Scope.Value,
},
},
DomainNamePatterns: exp.Target.DNSChaos.DomainNamePatterns,
},
}
if len(exp.Target.DNSChaos.ContainerNames) != 0 {
chaos.Spec.ContainerNames = exp.Target.DNSChaos.ContainerNames
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createAWSChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.AWSChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.AWSChaosSpec{
Action: v1alpha1.AWSChaosAction(exp.Target.AWSChaos.Action),
SecretName: exp.Target.AWSChaos.SecretName,
AWSSelector: v1alpha1.AWSSelector{
AWSRegion: exp.Target.AWSChaos.AWSRegion,
Ec2Instance: exp.Target.AWSChaos.Ec2Instance,
EbsVolume: exp.Target.AWSChaos.EbsVolume,
DeviceName: exp.Target.AWSChaos.DeviceName,
},
},
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) createGCPChaos(exp *core.ExperimentInfo, kubeCli client.Client) error {
chaos := &v1alpha1.GCPChaos{
ObjectMeta: metav1.ObjectMeta{
Name: exp.Name,
Namespace: exp.Namespace,
Labels: exp.Labels,
Annotations: exp.Annotations,
},
Spec: v1alpha1.GCPChaosSpec{
Action: v1alpha1.GCPChaosAction(exp.Target.GCPChaos.Action),
SecretName: exp.Target.GCPChaos.SecretName,
GCPSelector: v1alpha1.GCPSelector{
Project: exp.Target.GCPChaos.Project,
Zone: exp.Target.GCPChaos.Zone,
Instance: exp.Target.GCPChaos.Instance,
DeviceNames: exp.Target.GCPChaos.DeviceNames,
},
},
}
if exp.Scheduler.Duration != "" {
chaos.Spec.Duration = &exp.Scheduler.Duration
}
return kubeCli.Create(context.Background(), chaos)
}
func (s *Service) getPodChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.PodChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
UID: chaos.GetChaos().UID,
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getIOChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.IOChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
UID: chaos.GetChaos().UID,
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getNetworkChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.NetworkChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
UID: chaos.GetChaos().UID,
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getTimeChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.TimeChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getKernelChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.KernelChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getStressChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.StressChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getDNSChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.DNSChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getAWSChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.AWSChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
func (s *Service) getGCPChaosDetail(namespace string, name string, kubeCli client.Client) (Detail, error) {
chaos := &v1alpha1.GCPChaos{}
chaosKey := types.NamespacedName{Namespace: namespace, Name: name}
if err := kubeCli.Get(context.Background(), chaosKey, chaos); err != nil {
if apierrors.IsNotFound(err) {
return Detail{}, utils.ErrNotFound.NewWithNoMessage()
}
return Detail{}, err
}
gvk, err := apiutil.GVKForObject(chaos, s.scheme)
if err != nil {
return Detail{}, err
}
return Detail{
Experiment: Experiment{
Base: Base{
Kind: gvk.Kind,
Namespace: chaos.Namespace,
Name: chaos.Name,
},
Created: chaos.GetChaos().StartTime.Format(time.RFC3339),
Status: string(utils.GetChaosState(chaos)),
UID: chaos.GetChaos().UID,
},
KubeObject: core.KubeObjectDesc{
TypeMeta: metav1.TypeMeta{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
},
Meta: core.KubeObjectMeta{
Name: chaos.Name,
Namespace: chaos.Namespace,
Labels: chaos.Labels,
Annotations: chaos.Annotations,
},
Spec: chaos.Spec,
},
}, nil
}
// @Summary Get chaos experiments from Kubernetes cluster.
// @Description Get chaos experiments from Kubernetes cluster.
// @Tags experiments
// @Produce json
// @Param namespace query string false "namespace"
// @Param name query string false "name"
// @Param kind query string false "kind" Enums(PodChaos, IOChaos, NetworkChaos, TimeChaos, KernelChaos, StressChaos)
// @Param status query string false "status" Enums(Running, Paused, Failed, Finished)
// @Success 200 {array} Experiment
// @Router /experiments [get]
// @Failure 500 {object} utils.APIError
func (s *Service) listExperiments(c *gin.Context) {
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
kind := c.Query("kind")
name := c.Query("name")
ns := c.Query("namespace")
if len(ns) == 0 && !s.conf.ClusterScoped &&
len(s.conf.TargetNamespace) != 0 {
ns = s.conf.TargetNamespace
}
exps := make([]*Experiment, 0)
for key, list := range v1alpha1.AllKinds() {
if kind != "" && key != kind {
continue
}
if err := kubeCli.List(context.Background(), list.ChaosList, &client.ListOptions{Namespace: ns}); err != nil {
c.Status(http.StatusInternalServerError)
utils.SetErrorForGinCtx(c, err)
return
}
items := reflect.ValueOf(list.ChaosList).Elem().FieldByName("Items")
for i := 0; i < items.Len(); i++ {
item := items.Index(i).Addr().Interface().(v1alpha1.InnerObject)
chaos := item.GetChaos()
if name != "" && chaos.Name != name {
continue
}
status := utils.GetChaosState(item)
exps = append(exps, &Experiment{
Base: Base{
Name: chaos.Name,
Namespace: chaos.Namespace,
Kind: chaos.Kind,
},
Created: chaos.StartTime.Format(time.RFC3339),
Status: string(status),
UID: chaos.UID,
})
}
}
sort.Slice(exps, func(i, j int) bool {
return exps[i].Created > exps[j].Created
})
c.JSON(http.StatusOK, exps)
}
// @Summary Get detailed information about the specified chaos experiment.
// @Description Get detailed information about the specified chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Router /experiments/detail/{uid} [GET]
// @Success 200 {object} Detail
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
func (s *Service) getExperimentDetail(c *gin.Context) {
var (
exp *core.Experiment
expDetail Detail
)
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
uid := c.Param("uid")
if exp, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
}
return
}
kind := exp.Kind
ns := exp.Namespace
name := exp.Name
switch kind {
case v1alpha1.KindPodChaos:
expDetail, err = s.getPodChaosDetail(ns, name, kubeCli)
case v1alpha1.KindIOChaos:
expDetail, err = s.getIOChaosDetail(ns, name, kubeCli)
case v1alpha1.KindNetworkChaos:
expDetail, err = s.getNetworkChaosDetail(ns, name, kubeCli)
case v1alpha1.KindTimeChaos:
expDetail, err = s.getTimeChaosDetail(ns, name, kubeCli)
case v1alpha1.KindKernelChaos:
expDetail, err = s.getKernelChaosDetail(ns, name, kubeCli)
case v1alpha1.KindStressChaos:
expDetail, err = s.getStressChaosDetail(ns, name, kubeCli)
case v1alpha1.KindDNSChaos:
expDetail, err = s.getDNSChaosDetail(ns, name, kubeCli)
case v1alpha1.KindAWSChaos:
expDetail, err = s.getAWSChaosDetail(ns, name, kubeCli)
case v1alpha1.KindGCPChaos:
expDetail, err = s.getGCPChaosDetail(ns, name, kubeCli)
}
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, expDetail)
}
// @Summary Delete the specified chaos experiment.
// @Description Delete the specified chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Param force query string true "force" Enums(true, false)
// @Success 200 {object} StatusResponse
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments/{uid} [delete]
func (s *Service) deleteExperiment(c *gin.Context) {
var (
chaosKind *v1alpha1.ChaosKind
ok bool
exp *core.Experiment
)
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
uid := c.Param("uid")
if exp, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
}
return
}
kind := exp.Kind
ns := exp.Namespace
name := exp.Name
force := c.DefaultQuery("force", "false")
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: ns, Name: name}
if chaosKind, ok = v1alpha1.AllKinds()[kind]; !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(kind + " is not supported"))
return
}
if err := kubeCli.Get(ctx, chaosKey, chaosKind.Chaos); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
if force == "true" {
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
return setAnnotation(kubeCli, kind, ns, name)
})
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("forced deletion of chaos failed, because update chaos annotation error")))
return
}
}
if err := kubeCli.Delete(ctx, chaosKind.Chaos, &client.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.NewWithNoMessage())
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
c.JSON(http.StatusOK, StatusResponse{Status: "success"})
}
// @Summary Delete the specified chaos experiment.
// @Description Delete the specified chaos experiment.
// @Tags experiments
// @Produce json
// @Param uids query string true "uids"
// @Param force query string true "force" Enums(true, false)
// @Success 200 {object} StatusResponse
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments [delete]
func (s *Service) batchDeleteExperiment(c *gin.Context) {
var (
chaosKind *v1alpha1.ChaosKind
ok bool
exp *core.Experiment
errFlag bool
uidSlice []string
)
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
uids := c.Query("uids")
if uids == "" {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("uids cannot be empty")))
return
}
force := c.DefaultQuery("force", "false")
uidSlice = strings.Split(uids, ",")
errFlag = false
if len(uidSlice) > 100 {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("too many uids, please reduce the number of uids")))
return
}
for _, uid := range uidSlice {
if exp, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if gorm.IsRecordNotFoundError(err) {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because the experiment is not found", uid)))
} else {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because %s", uid, err.Error())))
}
errFlag = true
continue
}
kind := exp.Kind
ns := exp.Namespace
name := exp.Name
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: ns, Name: name}
if chaosKind, ok = v1alpha1.AllKinds()[kind]; !ok {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because kind (%s) is not supported", uid, kind)))
errFlag = true
continue
}
if err := kubeCli.Get(ctx, chaosKey, chaosKind.Chaos); err != nil {
if apierrors.IsNotFound(err) {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because the chaos is not found", uid)))
} else {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because %s", uid, err.Error())))
}
errFlag = true
continue
}
if force == "true" {
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
return setAnnotation(kubeCli, kind, ns, name)
})
if err != nil {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("forced delete experiment uid (%s) error, because update chaos annotation error", uid)))
errFlag = true
continue
}
}
if err := kubeCli.Delete(ctx, chaosKind.Chaos, &client.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because the chaos is not found", uid)))
} else {
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(fmt.Errorf("delete experiment uid (%s) error, because %s", uid, err.Error())))
}
errFlag = true
continue
}
}
if errFlag {
c.Status(http.StatusInternalServerError)
} else {
c.JSON(http.StatusOK, StatusResponse{Status: "success"})
}
}
// @Summary Pause a chaos experiment.
// @Description Pause a chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Success 200 {object} StatusResponse
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments/pause/{uid} [put]
func (s *Service) pauseExperiment(c *gin.Context) {
var experiment *core.Experiment
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
uid := c.Param("uid")
if experiment, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
}
return
}
exp := &Base{
Kind: experiment.Kind,
Name: experiment.Name,
Namespace: experiment.Namespace,
}
annotations := map[string]string{
v1alpha1.PauseAnnotationKey: "true",
}
if err := s.patchExperiment(exp, annotations, kubeCli); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
return
}
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, StatusResponse{Status: "success"})
}
// @Summary Start a chaos experiment.
// @Description Start a chaos experiment.
// @Tags experiments
// @Produce json
// @Param uid path string true "uid"
// @Success 200 {object} StatusResponse
// @Failure 400 {object} utils.APIError
// @Failure 404 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments/start/{uid} [put]
func (s *Service) startExperiment(c *gin.Context) {
var experiment *core.Experiment
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
uid := c.Param("uid")
if experiment, err = s.archive.FindByUID(context.Background(), uid); err != nil {
if gorm.IsRecordNotFoundError(err) {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInvalidRequest.New("the experiment is not found"))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.NewWithNoMessage())
}
return
}
exp := &Base{
Kind: experiment.Kind,
Name: experiment.Name,
Namespace: experiment.Namespace,
}
annotations := map[string]string{
v1alpha1.PauseAnnotationKey: "false",
}
if err := s.patchExperiment(exp, annotations, kubeCli); err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
return
}
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return
}
c.JSON(http.StatusOK, StatusResponse{Status: "success"})
}
func (s *Service) patchExperiment(exp *Base, annotations map[string]string, kubeCli client.Client) error {
var (
chaosKind *v1alpha1.ChaosKind
ok bool
)
if chaosKind, ok = v1alpha1.AllKinds()[exp.Kind]; !ok {
return fmt.Errorf("%s is not supported", exp.Kind)
}
key := types.NamespacedName{Namespace: exp.Namespace, Name: exp.Name}
if err := kubeCli.Get(context.Background(), key, chaosKind.Chaos); err != nil {
return err
}
var mergePatch []byte
mergePatch, _ = json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"annotations": annotations,
},
})
return kubeCli.Patch(context.Background(),
chaosKind.Chaos,
client.ConstantPatch(types.MergePatchType, mergePatch))
}
// @Summary Get chaos experiments state from Kubernetes cluster.
// @Description Get chaos experiments state from Kubernetes cluster.
// @Tags experiments
// @Produce json
// @Param namespace query string false "namespace"
// @Success 200 {object} ChaosState
// @Router /experiments/state [get]
// @Failure 500 {object} utils.APIError
func (s *Service) state(c *gin.Context) {
var (
err error
)
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
namespace := c.Query("namespace")
if len(namespace) == 0 && !s.conf.ClusterScoped &&
len(s.conf.TargetNamespace) != 0 {
namespace = s.conf.TargetNamespace
}
states := new(ChaosState)
g, ctx := errgroup.WithContext(context.Background())
m := &sync.Mutex{}
kinds := v1alpha1.AllKinds()
var listOptions []client.ListOption
if !s.conf.ClusterScoped {
listOptions = append(listOptions, &client.ListOptions{Namespace: s.conf.TargetNamespace})
} else if len(namespace) != 0 {
listOptions = append(listOptions, &client.ListOptions{Namespace: namespace})
}
for index := range kinds {
list := kinds[index]
g.Go(func() error {
if err := kubeCli.List(ctx, list.ChaosList, listOptions...); err != nil {
return err
}
m.Lock()
items := reflect.ValueOf(list.ChaosList).Elem().FieldByName("Items")
for i := 0; i < items.Len(); i++ {
item := items.Index(i).Addr().Interface().(v1alpha1.InnerObject)
state := utils.GetChaosState(item)
if err != nil {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
return err
}
switch state {
case utils.Paused:
states.Paused++
case utils.Running:
states.Running++
case utils.Injecting:
states.Injecting++
case utils.Finished:
states.Finished++
}
}
m.Unlock()
return nil
})
}
if err := g.Wait(); err != nil {
c.Status(http.StatusInternalServerError)
utils.SetErrorForGinCtx(c, err)
return
}
c.JSON(http.StatusOK, states)
}
// @Summary Update a chaos experiment.
// @Description Update a chaos experiment.
// @Tags experiments
// @Produce json
// @Param request body core.KubeObjectDesc true "Request body"
// @Success 200 {object} core.KubeObjectDesc
// @Failure 400 {object} utils.APIError
// @Failure 500 {object} utils.APIError
// @Router /experiments/update [put]
func (s *Service) updateExperiment(c *gin.Context) {
kubeCli, err := clientpool.ExtractTokenAndGetClient(c.Request.Header)
if err != nil {
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
exp := &core.KubeObjectDesc{}
if err := c.ShouldBindJSON(exp); err != nil {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.WrapWithNoMessage(err))
return
}
updateFuncs := map[string]updateExperimentFunc{
v1alpha1.KindPodChaos: s.updatePodChaos,
v1alpha1.KindNetworkChaos: s.updateNetworkChaos,
v1alpha1.KindIOChaos: s.updateIOChaos,
v1alpha1.KindStressChaos: s.updateStressChaos,
v1alpha1.KindTimeChaos: s.updateTimeChaos,
v1alpha1.KindKernelChaos: s.updateKernelChaos,
v1alpha1.KindDNSChaos: s.updateDNSChaos,
v1alpha1.KindAWSChaos: s.updateAWSChaos,
v1alpha1.KindGCPChaos: s.updateGCPChaos,
}
f, ok := updateFuncs[exp.Kind]
if !ok {
c.Status(http.StatusBadRequest)
_ = c.Error(utils.ErrInvalidRequest.New(exp.Kind + " is not supported"))
return
}
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
return f(exp, kubeCli)
})
if err != nil {
if apierrors.IsNotFound(err) {
c.Status(http.StatusNotFound)
_ = c.Error(utils.ErrNotFound.WrapWithNoMessage(err))
} else {
c.Status(http.StatusInternalServerError)
_ = c.Error(utils.ErrInternalServer.WrapWithNoMessage(err))
}
return
}
c.JSON(http.StatusOK, exp)
}
func (s *Service) updatePodChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.PodChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateNetworkChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.NetworkChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateIOChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.IOChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateKernelChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.KernelChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateTimeChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.TimeChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateStressChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.StressChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateDNSChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.DNSChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateAWSChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.AWSChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func (s *Service) updateGCPChaos(exp *core.KubeObjectDesc, kubeCli client.Client) error {
chaos := &v1alpha1.AWSChaos{}
meta := &exp.Meta
key := types.NamespacedName{Namespace: meta.Namespace, Name: meta.Name}
if err := kubeCli.Get(context.Background(), key, chaos); err != nil {
return err
}
chaos.SetLabels(meta.Labels)
chaos.SetAnnotations(meta.Annotations)
// TODO: update chaos
return kubeCli.Update(context.Background(), chaos)
}
func setAnnotation(kubeCli client.Client, kind string, ns string, name string) error {
var (
chaosKind *v1alpha1.ChaosKind
chaosMeta metav1.Object
ok bool
)
if chaosKind, ok = v1alpha1.AllKinds()[kind]; !ok {
return fmt.Errorf(kind + " is not supported")
}
ctx := context.TODO()
chaosKey := types.NamespacedName{Namespace: ns, Name: name}
if err := kubeCli.Get(ctx, chaosKey, chaosKind.Chaos); err != nil {
return err
}
if chaosMeta, ok = chaosKind.Chaos.(metav1.Object); !ok {
return fmt.Errorf("failed to get chaos meta information")
}
annotations := chaosMeta.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations[finalizers.AnnotationCleanFinalizer] = finalizers.AnnotationCleanFinalizerForced
chaosMeta.SetAnnotations(annotations)
return kubeCli.Update(context.Background(), chaosKind.Chaos)
}
| 1 | 24,170 | Panic here if `exp.Target.StressChaos.CintainerName` is nil. | chaos-mesh-chaos-mesh | go |
@@ -93,4 +93,16 @@ public interface LeafCollector {
*/
void collect(int doc) throws IOException;
+ /**
+ * Optionally creates a view of the scorerIterator where only competitive documents
+ * in the scorerIterator are kept and non-competitive are skipped.
+ *
+ * Collectors should delegate this method to their comparators if
+ * their comparators provide the skipping functionality over non-competitive docs.
+ * The default is to return the same iterator which is interpreted as the collector doesn't filter any documents.
+ */
+ default DocIdSetIterator filterIterator(DocIdSetIterator scorerIterator) {
+ return scorerIterator;
+ }
+
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
/**
* <p>Collector decouples the score from the collected doc:
* the score computation is skipped entirely if it's not
* needed. Collectors that do need the score should
* implement the {@link #setScorer} method, to hold onto the
* passed {@link Scorer} instance, and call {@link
* Scorer#score()} within the collect method to compute the
* current hit's score. If your collector may request the
* score for a single hit multiple times, you should use
* {@link ScoreCachingWrappingScorer}. </p>
*
* <p><b>NOTE:</b> The doc that is passed to the collect
* method is relative to the current reader. If your
* collector needs to resolve this to the docID space of the
* Multi*Reader, you must re-base it by recording the
* docBase from the most recent setNextReader call. Here's
* a simple example showing how to collect docIDs into a
* BitSet:</p>
*
* <pre class="prettyprint">
* IndexSearcher searcher = new IndexSearcher(indexReader);
* final BitSet bits = new BitSet(indexReader.maxDoc());
* searcher.search(query, new Collector() {
*
* public LeafCollector getLeafCollector(LeafReaderContext context)
* throws IOException {
* final int docBase = context.docBase;
* return new LeafCollector() {
*
* <em>// ignore scorer</em>
* public void setScorer(Scorer scorer) throws IOException {
* }
*
* public void collect(int doc) throws IOException {
* bits.set(docBase + doc);
* }
*
* };
* }
*
* });
* </pre>
*
* <p>Not all collectors will need to rebase the docID. For
* example, a collector that simply counts the total number
* of hits would skip it.</p>
*
* @lucene.experimental
*/
public interface LeafCollector {
/**
* Called before successive calls to {@link #collect(int)}. Implementations
* that need the score of the current document (passed-in to
* {@link #collect(int)}), should save the passed-in Scorer and call
* scorer.score() when needed.
*/
void setScorer(Scorable scorer) throws IOException;
/**
* Called once for every document matching a query, with the unbased document
* number.
* <p>Note: The collection of the current segment can be terminated by throwing
* a {@link CollectionTerminatedException}. In this case, the last docs of the
* current {@link org.apache.lucene.index.LeafReaderContext} will be skipped and {@link IndexSearcher}
* will swallow the exception and continue collection with the next leaf.
* <p>
* Note: This is called in an inner search loop. For good search performance,
* implementations of this method should not call {@link IndexSearcher#doc(int)} or
* {@link org.apache.lucene.index.IndexReader#document(int)} on every hit.
* Doing so can slow searches by an order of magnitude or more.
*/
void collect(int doc) throws IOException;
}
| 1 | 33,207 | This allows for some hacks like returning an iterator that matches more docs than the scorer. I liked the previous approach that returned an iterator better. | apache-lucene-solr | java |
@@ -227,6 +227,7 @@ public class IcebergInputFormat<T> extends InputFormat<Void, T> {
private CloseableIterable<T> open(FileScanTask currentTask, Schema readSchema) {
DataFile file = currentTask.file();
+ LOG.debug("Opening [{}] for read", file);
// TODO we should make use of FileIO to create inputFile
InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration());
CloseableIterable<T> iterable; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.mr.mapreduce;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiFunction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.MetadataColumns;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.avro.Avro;
import org.apache.iceberg.data.IdentityPartitionConverters;
import org.apache.iceberg.data.avro.DataReader;
import org.apache.iceberg.data.orc.GenericOrcReader;
import org.apache.iceberg.data.parquet.GenericParquetReaders;
import org.apache.iceberg.expressions.Evaluator;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.CloseableIterator;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.InputFormatConfig;
import org.apache.iceberg.mr.SerializationUtil;
import org.apache.iceberg.orc.ORC;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.util.PartitionUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Generic Mrv2 InputFormat API for Iceberg.
*
* @param <T> T is the in memory data model which can either be Pig tuples, Hive rows. Default is Iceberg records
*/
public class IcebergInputFormat<T> extends InputFormat<Void, T> {
private static final Logger LOG = LoggerFactory.getLogger(IcebergInputFormat.class);
/**
* Configures the {@code Job} to use the {@code IcebergInputFormat} and
* returns a helper to add further configuration.
*
* @param job the {@code Job} to configure
*/
public static InputFormatConfig.ConfigBuilder configure(Job job) {
job.setInputFormatClass(IcebergInputFormat.class);
return new InputFormatConfig.ConfigBuilder(job.getConfiguration());
}
@Override
public List<InputSplit> getSplits(JobContext context) {
Configuration conf = context.getConfiguration();
Table table = Catalogs.loadTable(conf);
TableScan scan = table.newScan()
.caseSensitive(conf.getBoolean(InputFormatConfig.CASE_SENSITIVE, true));
long snapshotId = conf.getLong(InputFormatConfig.SNAPSHOT_ID, -1);
if (snapshotId != -1) {
scan = scan.useSnapshot(snapshotId);
}
long asOfTime = conf.getLong(InputFormatConfig.AS_OF_TIMESTAMP, -1);
if (asOfTime != -1) {
scan = scan.asOfTime(asOfTime);
}
long splitSize = conf.getLong(InputFormatConfig.SPLIT_SIZE, 0);
if (splitSize > 0) {
scan = scan.option(TableProperties.SPLIT_SIZE, String.valueOf(splitSize));
}
String schemaStr = conf.get(InputFormatConfig.READ_SCHEMA);
if (schemaStr != null) {
scan.project(SchemaParser.fromJson(schemaStr));
}
// TODO add a filter parser to get rid of Serialization
Expression filter = SerializationUtil.deserializeFromBase64(conf.get(InputFormatConfig.FILTER_EXPRESSION));
if (filter != null) {
scan = scan.filter(filter);
}
List<InputSplit> splits = Lists.newArrayList();
boolean applyResidual = !conf.getBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, false);
InputFormatConfig.InMemoryDataModel model = conf.getEnum(InputFormatConfig.IN_MEMORY_DATA_MODEL,
InputFormatConfig.InMemoryDataModel.GENERIC);
try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) {
tasksIterable.forEach(task -> {
if (applyResidual && (model == InputFormatConfig.InMemoryDataModel.HIVE ||
model == InputFormatConfig.InMemoryDataModel.PIG)) {
// TODO: We do not support residual evaluation for HIVE and PIG in memory data model yet
checkResiduals(task);
}
splits.add(new IcebergSplit(conf, task));
});
} catch (IOException e) {
throw new UncheckedIOException(String.format("Failed to close table scan: %s", scan), e);
}
return splits;
}
private static void checkResiduals(CombinedScanTask task) {
task.files().forEach(fileScanTask -> {
Expression residual = fileScanTask.residual();
if (residual != null && !residual.equals(Expressions.alwaysTrue())) {
throw new UnsupportedOperationException(
String.format(
"Filter expression %s is not completely satisfied. Additional rows " +
"can be returned not satisfied by the filter expression", residual));
}
});
}
@Override
public RecordReader<Void, T> createRecordReader(InputSplit split, TaskAttemptContext context) {
return new IcebergRecordReader<>();
}
private static final class IcebergRecordReader<T> extends RecordReader<Void, T> {
private TaskAttemptContext context;
private Schema tableSchema;
private Schema expectedSchema;
private boolean reuseContainers;
private boolean caseSensitive;
private InputFormatConfig.InMemoryDataModel inMemoryDataModel;
private Iterator<FileScanTask> tasks;
private T currentRow;
private CloseableIterator<T> currentIterator;
@Override
public void initialize(InputSplit split, TaskAttemptContext newContext) {
Configuration conf = newContext.getConfiguration();
// For now IcebergInputFormat does its own split planning and does not accept FileSplit instances
CombinedScanTask task = ((IcebergSplit) split).task();
this.context = newContext;
this.tasks = task.files().iterator();
this.tableSchema = SchemaParser.fromJson(conf.get(InputFormatConfig.TABLE_SCHEMA));
String readSchemaStr = conf.get(InputFormatConfig.READ_SCHEMA);
this.expectedSchema = readSchemaStr != null ? SchemaParser.fromJson(readSchemaStr) : tableSchema;
this.reuseContainers = conf.getBoolean(InputFormatConfig.REUSE_CONTAINERS, false);
this.caseSensitive = conf.getBoolean(InputFormatConfig.CASE_SENSITIVE, true);
this.inMemoryDataModel = conf.getEnum(InputFormatConfig.IN_MEMORY_DATA_MODEL,
InputFormatConfig.InMemoryDataModel.GENERIC);
this.currentIterator = open(tasks.next(), expectedSchema).iterator();
}
@Override
public boolean nextKeyValue() throws IOException {
while (true) {
if (currentIterator.hasNext()) {
currentRow = currentIterator.next();
return true;
} else if (tasks.hasNext()) {
currentIterator.close();
currentIterator = open(tasks.next(), expectedSchema).iterator();
} else {
currentIterator.close();
return false;
}
}
}
@Override
public Void getCurrentKey() {
return null;
}
@Override
public T getCurrentValue() {
return currentRow;
}
@Override
public float getProgress() {
// TODO: We could give a more accurate progress based on records read from the file. Context.getProgress does not
// have enough information to give an accurate progress value. This isn't that easy, since we don't know how much
// of the input split has been processed and we are pushing filters into Parquet and ORC. But we do know when a
// file is opened and could count the number of rows returned, so we can estimate. And we could also add a row
// count to the readers so that we can get an accurate count of rows that have been either returned or filtered
// out.
return context.getProgress();
}
@Override
public void close() throws IOException {
currentIterator.close();
}
private CloseableIterable<T> open(FileScanTask currentTask, Schema readSchema) {
DataFile file = currentTask.file();
// TODO we should make use of FileIO to create inputFile
InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration());
CloseableIterable<T> iterable;
switch (file.format()) {
case AVRO:
iterable = newAvroIterable(inputFile, currentTask, readSchema);
break;
case ORC:
iterable = newOrcIterable(inputFile, currentTask, readSchema);
break;
case PARQUET:
iterable = newParquetIterable(inputFile, currentTask, readSchema);
break;
default:
throw new UnsupportedOperationException(
String.format("Cannot read %s file: %s", file.format().name(), file.path()));
}
return iterable;
}
private CloseableIterable<T> applyResidualFiltering(CloseableIterable<T> iter, Expression residual,
Schema readSchema) {
boolean applyResidual = !context.getConfiguration().getBoolean(InputFormatConfig.SKIP_RESIDUAL_FILTERING, false);
if (applyResidual && residual != null && residual != Expressions.alwaysTrue()) {
Evaluator filter = new Evaluator(readSchema.asStruct(), residual, caseSensitive);
return CloseableIterable.filter(iter, record -> filter.eval((StructLike) record));
} else {
return iter;
}
}
private CloseableIterable<T> newAvroIterable(
InputFile inputFile, FileScanTask task, Schema readSchema) {
Avro.ReadBuilder avroReadBuilder = Avro.read(inputFile)
.project(readSchema)
.split(task.start(), task.length());
if (reuseContainers) {
avroReadBuilder.reuseContainers();
}
switch (inMemoryDataModel) {
case PIG:
case HIVE:
// TODO implement value readers for Pig and Hive
throw new UnsupportedOperationException("Avro support not yet supported for Pig and Hive");
case GENERIC:
avroReadBuilder.createReaderFunc(
(expIcebergSchema, expAvroSchema) ->
DataReader.create(expIcebergSchema, expAvroSchema,
constantsMap(task, IdentityPartitionConverters::convertConstant)));
}
return applyResidualFiltering(avroReadBuilder.build(), task.residual(), readSchema);
}
private CloseableIterable<T> newParquetIterable(InputFile inputFile, FileScanTask task, Schema readSchema) {
Parquet.ReadBuilder parquetReadBuilder = Parquet.read(inputFile)
.project(readSchema)
.filter(task.residual())
.caseSensitive(caseSensitive)
.split(task.start(), task.length());
if (reuseContainers) {
parquetReadBuilder.reuseContainers();
}
switch (inMemoryDataModel) {
case PIG:
case HIVE:
// TODO implement value readers for Pig and Hive
throw new UnsupportedOperationException("Parquet support not yet supported for Pig and Hive");
case GENERIC:
parquetReadBuilder.createReaderFunc(
fileSchema -> GenericParquetReaders.buildReader(
readSchema, fileSchema, constantsMap(task, IdentityPartitionConverters::convertConstant)));
}
return applyResidualFiltering(parquetReadBuilder.build(), task.residual(), readSchema);
}
private CloseableIterable<T> newOrcIterable(InputFile inputFile, FileScanTask task, Schema readSchema) {
Map<Integer, ?> idToConstant = constantsMap(task, IdentityPartitionConverters::convertConstant);
Schema readSchemaWithoutConstantAndMetadataFields = TypeUtil.selectNot(readSchema,
Sets.union(idToConstant.keySet(), MetadataColumns.metadataFieldIds()));
ORC.ReadBuilder orcReadBuilder = ORC.read(inputFile)
.project(readSchemaWithoutConstantAndMetadataFields)
.filter(task.residual())
.caseSensitive(caseSensitive)
.split(task.start(), task.length());
// ORC does not support reuse containers yet
switch (inMemoryDataModel) {
case PIG:
case HIVE:
// TODO: implement value readers for Pig and Hive
throw new UnsupportedOperationException("ORC support not yet supported for Pig and Hive");
case GENERIC:
orcReadBuilder.createReaderFunc(
fileSchema -> GenericOrcReader.buildReader(
readSchema, fileSchema, idToConstant));
}
return applyResidualFiltering(orcReadBuilder.build(), task.residual(), readSchema);
}
private Map<Integer, ?> constantsMap(FileScanTask task, BiFunction<Type, Object, Object> converter) {
PartitionSpec spec = task.spec();
Set<Integer> idColumns = spec.identitySourceIds();
Schema partitionSchema = TypeUtil.select(expectedSchema, idColumns);
boolean projectsIdentityPartitionColumns = !partitionSchema.columns().isEmpty();
if (projectsIdentityPartitionColumns) {
return PartitionUtil.constantsMap(task, converter);
} else {
return Collections.emptyMap();
}
}
}
}
| 1 | 23,827 | Aren't there already logs for this from the underlying file system implementation? | apache-iceberg | java |
@@ -75,7 +75,7 @@ class SearchHandlerTest extends TestCase
{
$spec = ['DismaxParams' => [['foo', 'bar'], ['mm', '100%']], 'DismaxFields' => ['field1', 'field2']];
$hndl = new SearchHandler($spec);
- $defaults = ['CustomMunge' => [], 'DismaxHandler' => 'dismax', 'QueryFields' => [], 'FilterQuery' => []];
+ $defaults = ['CustomMunge' => [], 'DismaxHandler' => 'dismax', 'QueryFields' => [], 'FilterQuery' => [], 'DismaxMunge' => []];
$this->assertEquals($spec + $defaults, $hndl->toArray());
}
| 1 | <?php
/**
* Unit tests for SOLR search handler.
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org
*/
namespace VuFindTest\Backend\Solr;
use PHPUnit\Framework\TestCase;
use VuFindSearch\Backend\Solr\SearchHandler;
/**
* Unit tests for SOLR search handler.
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org
*/
class SearchHandlerTest extends TestCase
{
/**
* Test creating simple dismax query.
*
* @return void
*/
public function testSimpleSearchDismax()
{
$spec = ['DismaxParams' => [['foo', 'bar']], 'DismaxFields' => ['field1', 'field2']];
$hndl = new SearchHandler($spec);
$this->assertEquals('(_query_:"{!dismax qf=\"field1 field2\" foo=\\\'bar\\\' mm=\\\'100%\\\'}foobar")', $hndl->createSimpleQueryString('foobar'));
}
/**
* Test creating simple standard query.
*
* @return void
*/
public function testSimpleStandardSearch()
{
$spec = ['QueryFields' => ['id' => [['or', '~']]]];
$hndl = new SearchHandler($spec);
$this->assertEquals('(id:("escaped\"quote" OR not OR quoted OR "basic phrase"))', $hndl->createSimpleQueryString('"escaped\"quote" not quoted "basic phrase"'));
}
/**
* Test toArray() method.
*
* @return void
*/
public function testToArray()
{
$spec = ['DismaxParams' => [['foo', 'bar'], ['mm', '100%']], 'DismaxFields' => ['field1', 'field2']];
$hndl = new SearchHandler($spec);
$defaults = ['CustomMunge' => [], 'DismaxHandler' => 'dismax', 'QueryFields' => [], 'FilterQuery' => []];
$this->assertEquals($spec + $defaults, $hndl->toArray());
}
/**
* Test creating extended dismax query.
*
* @return void
*/
public function testSimpleSearchExtendedDismax()
{
$spec = ['DismaxParams' => [['foo', 'bar']], 'DismaxFields' => ['field1', 'field2']];
$hndl = new SearchHandler($spec, 'edismax');
$this->assertEquals('(_query_:"{!edismax qf=\"field1 field2\" foo=\\\'bar\\\' mm=\\\'0%\\\'}foobar")', $hndl->createSimpleQueryString('foobar'));
}
/**
* Test custom munge rules.
*
* @return void
*/
public function testCustomMunge()
{
// fake munge rules based on a simplified version of default searchspecs.yaml
$spec = [
'CustomMunge' => [
'callnumber_exact' => [
['uppercase'],
['preg_replace', '/[ "]/', ""],
['preg_replace', '/\*+$/', ""]
],
'callnumber_fuzzy' => [
['uppercase'],
['preg_replace', '/[ "]/', ""],
['preg_replace', '/\*+$/', ""],
['append', '*']
]
],
'QueryFields' => [
'callnumber' => [
['callnumber_exact', 1000],
['callnumber_fuzzy', '~'],
],
'dewey-full' => [
['callnumber_exact', 1000],
['callnumber_fuzzy', '~'],
]
]
];
$hndl = new SearchHandler($spec);
$this->assertEquals(
'(callnumber:(ABC123)^1000 OR callnumber:(ABC123*) OR dewey-full:(ABC123)^1000 OR dewey-full:(ABC123*))',
$hndl->createSimpleQueryString('abc"123*')
);
}
}
| 1 | 28,039 | It would be good to have a test in here that demonstrates the new munge functionality; I can help set that up if you're not sure how. | vufind-org-vufind | php |
@@ -62,6 +62,9 @@ class kubernetes(luigi.Config):
kubernetes_namespace = luigi.OptionalParameter(
default=None,
description="K8s namespace in which the job will run")
+ max_retrials_to_get_pods = luigi.IntParameter(
+ default=0,
+ description="Max retrials to get pods' informations")
class KubernetesJobTask(luigi.Task): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Outlier Bio, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Kubernetes Job wrapper for Luigi.
From the Kubernetes website:
Kubernetes is an open-source system for automating deployment, scaling,
and management of containerized applications.
For more information about Kubernetes Jobs: http://kubernetes.io/docs/user-guide/jobs/
Requires:
- pykube: ``pip install pykube``
Written and maintained by Marco Capuccini (@mcapuccini).
"""
import logging
import time
import uuid
from datetime import datetime
import luigi
logger = logging.getLogger('luigi-interface')
try:
from pykube.config import KubeConfig
from pykube.http import HTTPClient
from pykube.objects import Job, Pod
except ImportError:
logger.warning('pykube is not installed. KubernetesJobTask requires pykube.')
class kubernetes(luigi.Config):
auth_method = luigi.Parameter(
default="kubeconfig",
description="Authorization method to access the cluster")
kubeconfig_path = luigi.Parameter(
default="~/.kube/config",
description="Path to kubeconfig file for cluster authentication")
max_retrials = luigi.IntParameter(
default=0,
description="Max retrials in event of job failure")
kubernetes_namespace = luigi.OptionalParameter(
default=None,
description="K8s namespace in which the job will run")
class KubernetesJobTask(luigi.Task):
__POLL_TIME = 5 # see __track_job
_kubernetes_config = None # Needs to be loaded at runtime
def _init_kubernetes(self):
self.__logger = logger
self.__logger.debug("Kubernetes auth method: " + self.auth_method)
if self.auth_method == "kubeconfig":
self.__kube_api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path))
elif self.auth_method == "service-account":
self.__kube_api = HTTPClient(KubeConfig.from_service_account())
else:
raise ValueError("Illegal auth_method")
self.job_uuid = str(uuid.uuid4().hex)
now = datetime.utcnow()
self.uu_name = "%s-%s-%s" % (self.name, now.strftime('%Y%m%d%H%M%S'), self.job_uuid[:16])
@property
def auth_method(self):
"""
This can be set to ``kubeconfig`` or ``service-account``.
It defaults to ``kubeconfig``.
For more details, please refer to:
- kubeconfig: http://kubernetes.io/docs/user-guide/kubeconfig-file
- service-account: http://kubernetes.io/docs/user-guide/service-accounts
"""
return self.kubernetes_config.auth_method
@property
def kubeconfig_path(self):
"""
Path to kubeconfig file used for cluster authentication.
It defaults to "~/.kube/config", which is the default location
when using minikube (http://kubernetes.io/docs/getting-started-guides/minikube).
When auth_method is ``service-account`` this property is ignored.
**WARNING**: For Python versions < 3.5 kubeconfig must point to a Kubernetes API
hostname, and NOT to an IP address.
For more details, please refer to:
http://kubernetes.io/docs/user-guide/kubeconfig-file
"""
return self.kubernetes_config.kubeconfig_path
@property
def kubernetes_namespace(self):
"""
Namespace in Kubernetes where the job will run.
It defaults to the default namespace in Kubernetes
For more details, please refer to:
https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return self.kubernetes_config.kubernetes_namespace
@property
def name(self):
"""
A name for this job. This task will automatically append a UUID to the
name before to submit to Kubernetes.
"""
raise NotImplementedError("subclass must define name")
@property
def labels(self):
"""
Return custom labels for kubernetes job.
example::
``{"run_dt": datetime.date.today().strftime('%F')}``
"""
return {}
@property
def spec_schema(self):
"""
Kubernetes Job spec schema in JSON format, an example follows.
.. code-block:: javascript
{
"containers": [{
"name": "pi",
"image": "perl",
"command": ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
}],
"restartPolicy": "Never"
}
**restartPolicy**
- If restartPolicy is not defined, it will be set to "Never" by default.
- **Warning**: restartPolicy=OnFailure will bypass max_retrials, and restart
the container until success, with the risk of blocking the Luigi task.
For more informations please refer to:
http://kubernetes.io/docs/user-guide/pods/multi-container/#the-spec-schema
"""
raise NotImplementedError("subclass must define spec_schema")
@property
def max_retrials(self):
"""
Maximum number of retrials in case of failure.
"""
return self.kubernetes_config.max_retrials
@property
def backoff_limit(self):
"""
Maximum number of retries before considering the job as failed.
See: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
"""
return 6
@property
def delete_on_success(self):
"""
Delete the Kubernetes workload if the job has ended successfully.
"""
return True
@property
def print_pod_logs_on_exit(self):
"""
Fetch and print the pod logs once the job is completed.
"""
return False
@property
def active_deadline_seconds(self):
"""
Time allowed to successfully schedule pods.
See: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#job-termination-and-cleanup
"""
return None
@property
def kubernetes_config(self):
if not self._kubernetes_config:
self._kubernetes_config = kubernetes()
return self._kubernetes_config
def __track_job(self):
"""Poll job status while active"""
while not self.__verify_job_has_started():
time.sleep(self.__POLL_TIME)
self.__logger.debug("Waiting for Kubernetes job " + self.uu_name + " to start")
self.__print_kubectl_hints()
status = self.__get_job_status()
while status == "RUNNING":
self.__logger.debug("Kubernetes job " + self.uu_name + " is running")
time.sleep(self.__POLL_TIME)
status = self.__get_job_status()
assert status != "FAILED", "Kubernetes job " + self.uu_name + " failed"
# status == "SUCCEEDED"
self.__logger.info("Kubernetes job " + self.uu_name + " succeeded")
self.signal_complete()
def signal_complete(self):
"""Signal job completion for scheduler and dependent tasks.
Touching a system file is an easy way to signal completion. example::
.. code-block:: python
with self.output().open('w') as output_file:
output_file.write('')
"""
pass
def __get_pods(self):
pod_objs = Pod.objects(self.__kube_api, namespace=self.kubernetes_namespace) \
.filter(selector="job-name=" + self.uu_name) \
.response['items']
return [Pod(self.__kube_api, p) for p in pod_objs]
def __get_job(self):
jobs = Job.objects(self.__kube_api, namespace=self.kubernetes_namespace) \
.filter(selector="luigi_task_id=" + self.job_uuid) \
.response['items']
assert len(jobs) == 1, "Kubernetes job " + self.uu_name + " not found"
return Job(self.__kube_api, jobs[0])
def __print_pod_logs(self):
for pod in self.__get_pods():
logs = pod.logs(timestamps=True).strip()
self.__logger.info("Fetching logs from " + pod.name)
if len(logs) > 0:
for l in logs.split('\n'):
self.__logger.info(l)
def __print_kubectl_hints(self):
self.__logger.info("To stream Pod logs, use:")
for pod in self.__get_pods():
self.__logger.info("`kubectl logs -f pod/%s`" % pod.name)
def __verify_job_has_started(self):
"""Asserts that the job has successfully started"""
# Verify that the job started
self.__get_job()
# Verify that the pod started
pods = self.__get_pods()
assert len(pods) > 0, "No pod scheduled by " + self.uu_name
for pod in pods:
status = pod.obj['status']
for cont_stats in status.get('containerStatuses', []):
if 'terminated' in cont_stats['state']:
t = cont_stats['state']['terminated']
err_msg = "Pod %s %s (exit code %d). Logs: `kubectl logs pod/%s`" % (
pod.name, t['reason'], t['exitCode'], pod.name)
assert t['exitCode'] == 0, err_msg
if 'waiting' in cont_stats['state']:
wr = cont_stats['state']['waiting']['reason']
assert wr == 'ContainerCreating', "Pod %s %s. Logs: `kubectl logs pod/%s`" % (
pod.name, wr, pod.name)
for cond in status.get('conditions', []):
if 'message' in cond:
if cond['reason'] == 'ContainersNotReady':
return False
assert cond['status'] != 'False', \
"[ERROR] %s - %s" % (cond['reason'], cond['message'])
return True
def __get_job_status(self):
"""Return the Kubernetes job status"""
# Figure out status and return it
job = self.__get_job()
if "succeeded" in job.obj["status"] and job.obj["status"]["succeeded"] > 0:
job.scale(replicas=0)
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if self.delete_on_success:
self.__delete_job_cascade(job)
return "SUCCEEDED"
if "failed" in job.obj["status"]:
failed_cnt = job.obj["status"]["failed"]
self.__logger.debug("Kubernetes job " + self.uu_name
+ " status.failed: " + str(failed_cnt))
if self.print_pod_logs_on_exit:
self.__print_pod_logs()
if failed_cnt > self.max_retrials:
job.scale(replicas=0) # avoid more retrials
return "FAILED"
return "RUNNING"
def __delete_job_cascade(self, job):
delete_options_cascade = {
"kind": "DeleteOptions",
"apiVersion": "v1",
"propagationPolicy": "Background"
}
r = self.__kube_api.delete(json=delete_options_cascade, **job.api_kwargs())
if r.status_code != 200:
self.__kube_api.raise_for_status(r)
def run(self):
self._init_kubernetes()
# Render job
job_json = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": self.uu_name,
"labels": {
"spawned_by": "luigi",
"luigi_task_id": self.job_uuid
}
},
"spec": {
"backoffLimit": self.backoff_limit,
"template": {
"metadata": {
"name": self.uu_name
},
"spec": self.spec_schema
}
}
}
if self.kubernetes_namespace is not None:
job_json['metadata']['namespace'] = self.kubernetes_namespace
if self.active_deadline_seconds is not None:
job_json['spec']['activeDeadlineSeconds'] = \
self.active_deadline_seconds
# Update user labels
job_json['metadata']['labels'].update(self.labels)
# Add default restartPolicy if not specified
if "restartPolicy" not in self.spec_schema:
job_json["spec"]["template"]["spec"]["restartPolicy"] = "Never"
# Submit job
self.__logger.info("Submitting Kubernetes Job: " + self.uu_name)
job = Job(self.__kube_api, job_json)
job.create()
# Track the Job (wait while active)
self.__logger.info("Start tracking Kubernetes Job: " + self.uu_name)
self.__track_job()
def output(self):
"""
An output target is necessary for checking job completion unless
an alternative complete method is defined.
Example::
return luigi.LocalTarget(os.path.join('/tmp', 'example'))
"""
pass
| 1 | 19,135 | `retrials` isn't the word you intend. I think you mean `retries` and to that end, can the var name just be `max_retries`? | spotify-luigi | py |
@@ -213,6 +213,7 @@ class Shopware6ChannelForm extends AbstractType
AttributeIdType::class,
[
'label' => 'Attribute Product Meta Title',
+ 'help' => 'Value in product it should contain 255 characters or less.',
'choices' => array_flip($textareaAttributeDictionary),
'property_path' => 'attributeProductMetaTitle',
'required' => false, | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types=1);
namespace Ergonode\ExporterShopware6\Application\Form;
use Ergonode\Attribute\Application\Form\Type\AttributeIdType;
use Ergonode\Attribute\Domain\Entity\Attribute\GalleryAttribute;
use Ergonode\Attribute\Domain\Entity\Attribute\NumericAttribute;
use Ergonode\Attribute\Domain\Entity\Attribute\PriceAttribute;
use Ergonode\Attribute\Domain\Entity\Attribute\TextareaAttribute;
use Ergonode\Attribute\Domain\Entity\Attribute\TextAttribute;
use Ergonode\Attribute\Domain\Query\AttributeQueryInterface;
use Ergonode\Category\Domain\Query\TreeQueryInterface;
use Ergonode\Core\Domain\Query\LanguageQueryInterface;
use Ergonode\Core\Domain\ValueObject\Language;
use Ergonode\ExporterShopware6\Application\Form\Type\CustomFieldAttributeMapType;
use Ergonode\ExporterShopware6\Application\Form\Type\PropertyGroupAttributeMapType;
use Ergonode\ExporterShopware6\Application\Model\Shopware6ChannelFormModel;
use Ergonode\ProductCollection\Domain\Query\ProductCollectionQueryInterface;
use Ergonode\Segment\Domain\Query\SegmentQueryInterface;
use Symfony\Component\Form\AbstractType;
use Symfony\Component\Form\Extension\Core\Type\ChoiceType;
use Symfony\Component\Form\Extension\Core\Type\CollectionType;
use Symfony\Component\Form\Extension\Core\Type\TextType;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\OptionsResolver\OptionsResolver;
class Shopware6ChannelForm extends AbstractType
{
private AttributeQueryInterface $attributeQuery;
private LanguageQueryInterface $languageQuery;
private TreeQueryInterface $categoryTreeQuery;
private SegmentQueryInterface $segmentQuery;
private ProductCollectionQueryInterface $productCollectionQuery;
public function __construct(
AttributeQueryInterface $attributeQuery,
LanguageQueryInterface $languageQuery,
TreeQueryInterface $categoryTreeQuery,
SegmentQueryInterface $segmentQuery,
ProductCollectionQueryInterface $productCollectionQuery
) {
$this->attributeQuery = $attributeQuery;
$this->languageQuery = $languageQuery;
$this->categoryTreeQuery = $categoryTreeQuery;
$this->segmentQuery = $segmentQuery;
$this->productCollectionQuery = $productCollectionQuery;
}
/**
* @param array $options
*/
public function buildForm(FormBuilderInterface $builder, array $options): void
{
$priceAttributeDictionary = $this->attributeQuery->getDictionary([PriceAttribute::TYPE]);
$textAttributeDictionary = $this->attributeQuery->getDictionary([TextAttribute::TYPE]);
$textareaAttributeDictionary = $this->attributeQuery->getDictionary([TextareaAttribute::TYPE]);
$numericAttributeDictionary = $this->attributeQuery->getDictionary([NumericAttribute::TYPE]);
$galleryAttributeDictionary = $this->attributeQuery->getDictionary([GalleryAttribute::TYPE]);
$languages = $this->languageQuery->getDictionaryActive();
$categoryTrees = $this->categoryTreeQuery->getDictionary(new Language('en_GB'));
$segmentDictionary = $this->segmentQuery->getDictionary();
$productCollectionDictionary = $this->productCollectionQuery->getDictionary();
$builder
->add(
'name',
TextType::class,
[
'label' => 'Name',
]
)
->add(
'host',
TextType::class,
[
'help' => 'Enter the host API address',
'label' => 'API host',
]
)
->add(
'client_id',
TextType::class,
[
'label' => 'Access key ID',
'property_path' => 'clientId',
]
)
->add(
'client_key',
TextType::class,
[
'label' => 'Secret access key',
'property_path' => 'clientKey',
]
)
->add(
'segment',
ChoiceType::class,
[
'label' => 'Segment',
'property_path' => 'segment',
'choices' => array_flip($segmentDictionary),
'required' => false,
]
)
->add(
'default_language',
ChoiceType::class,
[
'label' => 'Default Language',
'property_path' => 'defaultLanguage',
'choices' => $languages,
]
)
->add(
'languages',
ChoiceType::class,
[
'label' => 'List of languages',
'choices' => $languages,
'multiple' => true,
'property_path' => 'languages',
'required' => false,
]
)
->add(
'attribute_product_name',
AttributeIdType::class,
[
'label' => 'Attribute Product Name',
'choices' => array_flip($textAttributeDictionary),
'property_path' => 'attributeProductName',
]
)
->add(
'attribute_product_active',
AttributeIdType::class,
[
'label' => 'Attribute Product Active',
'choices' => array_flip($numericAttributeDictionary),
'property_path' => 'attributeProductActive',
]
)
->add(
'attribute_product_stock',
AttributeIdType::class,
[
'label' => 'Attribute Product Stock',
'choices' => array_flip($numericAttributeDictionary),
'property_path' => 'attributeProductStock',
]
)
->add(
'attribute_product_price_gross',
AttributeIdType::class,
[
'label' => 'Attribute Product Price Gross',
'choices' => array_flip($priceAttributeDictionary),
'property_path' => 'attributeProductPriceGross',
]
)
->add(
'attribute_product_price_net',
AttributeIdType::class,
[
'label' => 'Attribute Product Price Net',
'choices' => array_flip($priceAttributeDictionary),
'property_path' => 'attributeProductPriceNet',
]
)
->add(
'attribute_product_tax',
AttributeIdType::class,
[
'label' => 'Attribute Product Tax',
'choices' => array_flip($numericAttributeDictionary),
'property_path' => 'attributeProductTax',
]
)
->add(
'attribute_product_description',
AttributeIdType::class,
[
'label' => 'Attribute Product Description',
'choices' => array_flip($textareaAttributeDictionary),
'property_path' => 'attributeProductDescription',
'required' => false,
]
)
->add(
'attribute_product_gallery',
AttributeIdType::class,
[
'label' => 'Attribute Product Gallery',
'choices' => array_flip($galleryAttributeDictionary),
'property_path' => 'attributeProductGallery',
'required' => false,
]
)
->add(
'attribute_product_meta_title',
AttributeIdType::class,
[
'label' => 'Attribute Product Meta Title',
'choices' => array_flip($textareaAttributeDictionary),
'property_path' => 'attributeProductMetaTitle',
'required' => false,
]
)
->add(
'attribute_product_meta_description',
AttributeIdType::class,
[
'label' => 'Attribute Product Meta Description',
'choices' => array_flip($textareaAttributeDictionary),
'property_path' => 'attributeProductMetaDescription',
'required' => false,
]
)
->add(
'attribute_product_keywords',
AttributeIdType::class,
[
'label' => 'Attribute Product Keywords',
'choices' => array_flip($textareaAttributeDictionary),
'property_path' => 'attributeProductKeywords',
'required' => false,
]
)
->add(
'category_tree',
ChoiceType::class,
[
'label' => 'Category tree',
'property_path' => 'categoryTree',
'choices' => array_flip($categoryTrees),
'required' => false,
]
)
->add(
'property_group',
CollectionType::class,
[
'property_path' => 'propertyGroup',
'label' => 'List Property Group to Export',
'allow_add' => true,
'allow_delete' => true,
'entry_type' => PropertyGroupAttributeMapType::class,
'required' => false,
]
)
->add(
'custom_field',
CollectionType::class,
[
'property_path' => 'customField',
'label' => 'List custom field to export',
'allow_add' => true,
'allow_delete' => true,
'entry_type' => CustomFieldAttributeMapType::class,
'required' => false,
]
)
->add(
'cross_selling',
ChoiceType::class,
[
'label' => 'List of Product Collections',
'choices' => array_flip($productCollectionDictionary),
'multiple' => true,
'property_path' => 'crossSelling',
'required' => false,
]
);
}
public function configureOptions(OptionsResolver $resolver): void
{
$resolver->setDefaults(
[
'translation_domain' => 'exporter',
'data_class' => Shopware6ChannelFormModel::class,
'allow_extra_fields' => true,
'label' => 'Export settings',
]
);
}
public function getBlockPrefix(): ?string
{
return null;
}
}
| 1 | 9,273 | Value in product should contain 255 characters or less. | ergonode-backend | php |
@@ -169,12 +169,12 @@ func main() {
logger.Error().Msg(fmt.Sprintf("Node %d: Can not get State height", i))
}
bcHeights[i] = chains[i].TipHeight()
- minTimeout = int(configs[i].Consensus.RollDPoS.Delay/time.Second - configs[i].Consensus.RollDPoS.ProposerInterval/time.Second)
+ minTimeout = int(configs[i].Consensus.RollDPoS.Delay/time.Second - configs[i].Consensus.RollDPoS.FSM.ProposerInterval/time.Second)
netTimeout = 0
if timeout > minTimeout {
netTimeout = timeout - minTimeout
}
- idealHeight[i] = uint64((time.Duration(netTimeout) * time.Second) / configs[i].Consensus.RollDPoS.ProposerInterval)
+ idealHeight[i] = uint64((time.Duration(netTimeout) * time.Second) / configs[i].Consensus.RollDPoS.FSM.ProposerInterval)
logger.Info().Msg(fmt.Sprintf("Node#%d blockchain height: %d", i, bcHeights[i]))
logger.Info().Msg(fmt.Sprintf("Node#%d state height: %d", i, stateHeights[i])) | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
// usage: make minicluster
package main
import (
"flag"
"fmt"
"math"
"sync"
"time"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/explorer"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/server/itx"
"github.com/iotexproject/iotex-core/testutil"
"github.com/iotexproject/iotex-core/tools/util"
)
const (
numNodes = 4
numAdmins = 2
)
func main() {
// timeout indicates the duration of running nightly build in seconds. Default is 300
var timeout int
// aps indicates how many actions to be injected in one second. Default is 0
var aps float64
// smart contract deployment data. Default is "608060405234801561001057600080fd5b506102f5806100206000396000f3006080604052600436106100615763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632885ad2c8114610066578063797d9fbd14610070578063cd5e3c5d14610091578063d0e30db0146100b8575b600080fd5b61006e6100c0565b005b61006e73ffffffffffffffffffffffffffffffffffffffff600435166100cb565b34801561009d57600080fd5b506100a6610159565b60408051918252519081900360200190f35b61006e610229565b6100c9336100cb565b565b60006100d5610159565b6040805182815290519192507fbae72e55df73720e0f671f4d20a331df0c0dc31092fda6c573f35ff7f37f283e919081900360200190a160405173ffffffffffffffffffffffffffffffffffffffff8316906305f5e100830280156108fc02916000818181858888f19350505050158015610154573d6000803e3d6000fd5b505050565b604080514460208083019190915260001943014082840152825180830384018152606090920192839052815160009360059361021a9360029391929182918401908083835b602083106101bd5780518252601f19909201916020918201910161019e565b51815160209384036101000a600019018019909216911617905260405191909301945091925050808303816000865af11580156101fe573d6000803e3d6000fd5b5050506040513d602081101561021357600080fd5b5051610261565b81151561022357fe5b06905090565b60408051348152905133917fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c919081900360200190a2565b600080805b60208110156102c25780600101602060ff160360080260020a848260208110151561028d57fe5b7f010000000000000000000000000000000000000000000000000000000000000091901a810204029190910190600101610266565b50929150505600a165627a7a72305820a426929891673b0a04d7163b60113d28e7d0f48ea667680ba48126c182b872c10029"
var deployExecData string
// smart contract interaction data. Default is "d0e30db0"
var interactExecData string
flag.IntVar(&timeout, "timeout", 100, "duration of running nightly build")
flag.Float64Var(&aps, "aps", 1, "actions to be injected per second")
flag.StringVar(&deployExecData, "deploy-data", "608060405234801561001057600080fd5b506102f5806100206000396000f3006080604052600436106100615763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632885ad2c8114610066578063797d9fbd14610070578063cd5e3c5d14610091578063d0e30db0146100b8575b600080fd5b61006e6100c0565b005b61006e73ffffffffffffffffffffffffffffffffffffffff600435166100cb565b34801561009d57600080fd5b506100a6610159565b60408051918252519081900360200190f35b61006e610229565b6100c9336100cb565b565b60006100d5610159565b6040805182815290519192507fbae72e55df73720e0f671f4d20a331df0c0dc31092fda6c573f35ff7f37f283e919081900360200190a160405173ffffffffffffffffffffffffffffffffffffffff8316906305f5e100830280156108fc02916000818181858888f19350505050158015610154573d6000803e3d6000fd5b505050565b604080514460208083019190915260001943014082840152825180830384018152606090920192839052815160009360059361021a9360029391929182918401908083835b602083106101bd5780518252601f19909201916020918201910161019e565b51815160209384036101000a600019018019909216911617905260405191909301945091925050808303816000865af11580156101fe573d6000803e3d6000fd5b5050506040513d602081101561021357600080fd5b5051610261565b81151561022357fe5b06905090565b60408051348152905133917fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c919081900360200190a2565b600080805b60208110156102c25780600101602060ff160360080260020a848260208110151561028d57fe5b7f010000000000000000000000000000000000000000000000000000000000000091901a810204029190910190600101610266565b50929150505600a165627a7a72305820a426929891673b0a04d7163b60113d28e7d0f48ea667680ba48126c182b872c10029",
"smart contract deployment data")
flag.StringVar(&interactExecData, "interact-data", "d0e30db0", "smart contract interaction data")
flag.Parse()
// path of config file containing all the public/private key paris of addresses getting transfers
// from Creator in genesis block
injectorConfigPath := "./tools/minicluster/gentsfaddrs.yaml"
chainAddrs, err := util.LoadAddresses(injectorConfigPath, uint32(1))
if err != nil {
logger.Fatal().Err(err).Msg("Failed to load addresses from config path")
}
admins := chainAddrs[len(chainAddrs)-numAdmins:]
delegates := chainAddrs[:len(chainAddrs)-numAdmins]
// path of config file containing all the transfers and self-nominations in genesis block
genesisConfigPath := "./tools/minicluster/testnet_actions.yaml"
// Set mini-cluster configurations
configs := make([]config.Config, numNodes)
for i := 0; i < numNodes; i++ {
chainDBPath := fmt.Sprintf("./chain%d.db", i+1)
trieDBPath := fmt.Sprintf("./trie%d.db", i+1)
networkPort := 4689 + i
explorerPort := 14004 + i
config := newConfig(genesisConfigPath, chainDBPath, trieDBPath, chainAddrs[i].PublicKey,
chainAddrs[i].PrivateKey, networkPort, explorerPort)
configs[i] = config
}
initLogger()
// Create mini-cluster
svrs := make([]*itx.Server, numNodes)
for i := 0; i < numNodes; i++ {
svr, err := itx.NewServer(configs[i])
if err != nil {
logger.Fatal().Err(err).Msg("Failed to create server.")
}
svrs[i] = svr
}
// Start mini-cluster
for i := 0; i < numNodes; i++ {
go itx.StartServer(svrs[i], configs[i])
}
if err := testutil.WaitUntil(10*time.Millisecond, 2*time.Second, func() (bool, error) {
return svrs[0].ChainService(uint32(1)).Explorer().Port() == 14004, nil
}); err != nil {
logger.Fatal().Err(err).Msg("Failed to start explorer JSON-RPC server")
}
// target address for jrpc connection. Default is "127.0.0.1:14004"
jrpcAddr := "127.0.0.1:14004"
client := explorer.NewExplorerProxy("http://" + jrpcAddr)
counter, err := util.InitCounter(client, chainAddrs)
if err != nil {
logger.Fatal().Err(err).Msg("Failed to initialize nonce counter")
}
// Inject actions to first node
if aps > 0 {
// transfer gas limit. Default is 1000000
transferGasLimit := 1000000
// transfer gas price. Default is 10
transferGasPrice := 10
// transfer payload. Default is ""
transferPayload := ""
// vote gas limit. Default is 1000000
voteGasLimit := 1000000
// vote gas price. Default is 10
voteGasPrice := 10
// execution amount. Default is 0
executionAmount := 0
// execution gas limit. Default is 1200000
executionGasLimit := 1200000
// execution gas price. Default is 10
executionGasPrice := 10
// maximum number of rpc retries. Default is 5
retryNum := 5
// sleeping period between two consecutive rpc retries in seconds. Default is 1
retryInterval := 1
// reset interval indicates the interval to reset nonce counter in seconds. Default is 60
resetInterval := 60
d := time.Duration(timeout) * time.Second
// First deploy a smart contract which can be interacted by injected executions
eHash, err := util.DeployContract(client, counter, delegates, executionGasLimit, executionGasPrice,
deployExecData, retryNum, retryInterval)
if err != nil {
logger.Fatal().Err(err).Msg("Failed to deploy smart contract")
}
// Wait until the smart contract is successfully deployed
var receipt *action.Receipt
if err := testutil.WaitUntil(100*time.Millisecond, 60*time.Second, func() (bool, error) {
receipt, err = svrs[0].ChainService(uint32(1)).Blockchain().GetReceiptByExecutionHash(eHash)
return receipt != nil, nil
}); err != nil {
logger.Fatal().Err(err).Msg("Failed to get receipt of execution deployment")
}
contract := receipt.ContractAddress
wg := &sync.WaitGroup{}
util.InjectByAps(wg, aps, counter, transferGasLimit, transferGasPrice, transferPayload, voteGasLimit, voteGasPrice,
contract, executionAmount, executionGasLimit, executionGasPrice, interactExecData, client, admins, delegates, d,
retryNum, retryInterval, resetInterval)
wg.Wait()
chains := make([]blockchain.Blockchain, numNodes)
stateHeights := make([]uint64, numNodes)
bcHeights := make([]uint64, numNodes)
idealHeight := make([]uint64, numNodes)
var netTimeout int
var minTimeout int
for i := 0; i < numNodes; i++ {
chains[i] = svrs[i].ChainService(configs[i].Chain.ID).Blockchain()
stateHeights[i], err = chains[i].GetFactory().Height()
if err != nil {
logger.Error().Msg(fmt.Sprintf("Node %d: Can not get State height", i))
}
bcHeights[i] = chains[i].TipHeight()
minTimeout = int(configs[i].Consensus.RollDPoS.Delay/time.Second - configs[i].Consensus.RollDPoS.ProposerInterval/time.Second)
netTimeout = 0
if timeout > minTimeout {
netTimeout = timeout - minTimeout
}
idealHeight[i] = uint64((time.Duration(netTimeout) * time.Second) / configs[i].Consensus.RollDPoS.ProposerInterval)
logger.Info().Msg(fmt.Sprintf("Node#%d blockchain height: %d", i, bcHeights[i]))
logger.Info().Msg(fmt.Sprintf("Node#%d state height: %d", i, stateHeights[i]))
logger.Info().Msg(fmt.Sprintf("Node#%d ideal height: %d", i, idealHeight[i]))
if bcHeights[i] != stateHeights[i] {
logger.Error().Msg(fmt.Sprintf("Node#%d: State height does not match blockchain height", i))
}
if math.Abs(float64(bcHeights[i]-idealHeight[i])) > 1 {
logger.Error().Msg(fmt.Sprintf("blockchain in Node#%d is behind the expected height", i))
}
}
for i := 0; i < numNodes; i++ {
for j := i + 1; j < numNodes; j++ {
if math.Abs(float64(bcHeights[i]-bcHeights[j])) > 1 {
logger.Error().Msg(fmt.Sprintf("blockchain in Node#%d and blockchain in Node#%d are not sync", i, j))
} else {
logger.Info().Msg(fmt.Sprintf("blockchain in Node#%d and blockchain in Node#%d are sync", i, j))
}
}
}
}
}
func newConfig(
genesisConfigPath,
chainDBPath,
trieDBPath string,
producerPubKey keypair.PublicKey,
producerPriKey keypair.PrivateKey,
networkPort,
explorerPort int,
) config.Config {
cfg := config.Default
cfg.NodeType = config.DelegateType
cfg.Network.Port = networkPort
cfg.Network.BootstrapNodes = []string{"127.0.0.1:4689"}
cfg.Chain.ID = 1
cfg.Chain.GenesisActionsPath = genesisConfigPath
cfg.Chain.ChainDBPath = chainDBPath
cfg.Chain.TrieDBPath = trieDBPath
cfg.Chain.NumCandidates = numNodes
cfg.Chain.ProducerPubKey = keypair.EncodePublicKey(producerPubKey)
cfg.Chain.ProducerPrivKey = keypair.EncodePrivateKey(producerPriKey)
cfg.Consensus.Scheme = config.RollDPoSScheme
cfg.Consensus.RollDPoS.DelegateInterval = 10 * time.Second
cfg.Consensus.RollDPoS.ProposerInterval = 10 * time.Second
cfg.Consensus.RollDPoS.UnmatchedEventInterval = 4 * time.Second
cfg.Consensus.RollDPoS.RoundStartTTL = 30 * time.Second
cfg.Consensus.RollDPoS.AcceptProposeTTL = 4 * time.Second
cfg.Consensus.RollDPoS.AcceptProposalEndorseTTL = 4 * time.Second
cfg.Consensus.RollDPoS.AcceptCommitEndorseTTL = 4 * time.Second
cfg.Consensus.RollDPoS.Delay = 10 * time.Second
cfg.Consensus.RollDPoS.NumSubEpochs = 2
cfg.Consensus.RollDPoS.EventChanSize = 100000
cfg.Consensus.RollDPoS.NumDelegates = numNodes
cfg.Consensus.RollDPoS.TimeBasedRotation = true
cfg.ActPool.MaxNumActsToPick = 2000
cfg.System.HTTPMetricsPort = 0
cfg.Explorer.Enabled = true
cfg.Explorer.Port = explorerPort
return cfg
}
func initLogger() {
l, err := logger.New()
if err != nil {
logger.Warn().Err(err).Msg("Cannot config logger, use default one.")
} else {
logger.SetLogger(l)
}
}
| 1 | 14,366 | line is 133 characters (from `lll`) | iotexproject-iotex-core | go |
@@ -78,7 +78,7 @@ module RSpec
end
def file_options
- custom_options_file ? [custom_options] : [global_options, local_options]
+ custom_options_file ? [custom_options] : [global_options, local_options, personal_options]
end
def env_options | 1 | require 'erb'
require 'shellwords'
module RSpec
module Core
# @private
class ConfigurationOptions
attr_reader :options
def initialize(args)
@args = args
if args.include?("--default_path")
args[args.index("--default_path")] = "--default-path"
end
if args.include?("--line_number")
args[args.index("--line_number")] = "--line-number"
end
end
def configure(config)
formatters = options.delete(:formatters)
config.filter_manager = filter_manager
order(options.keys, :libs, :requires, :default_path, :pattern).each do |key|
force?(key) ? config.force(key => options[key]) : config.send("#{key}=", options[key])
end
formatters.each {|pair| config.add_formatter(*pair) } if formatters
end
def parse_options
@options ||= extract_filters_from(*all_configs).inject do |merged, pending|
merged.merge(pending) { |key, oldval, newval|
MERGED_OPTIONS.include?(key) ? oldval + newval : newval
}
end
end
def drb_argv
DrbOptions.new(options, filter_manager).options
end
def filter_manager
@filter_manager ||= RSpec::configuration.filter_manager
end
private
NON_FORCED_OPTIONS = [
:debug, :requires, :libs, :profile, :drb, :files_or_directories_to_run,
:line_numbers, :full_description, :full_backtrace, :tty
].to_set
MERGED_OPTIONS = [:requires, :libs].to_set
def force?(key)
!NON_FORCED_OPTIONS.include?(key)
end
def order(keys, *ordered)
ordered.reverse.each do |key|
keys.unshift(key) if keys.delete(key)
end
keys
end
def extract_filters_from(*configs)
configs.compact.each do |config|
filter_manager.include config.delete(:inclusion_filter) if config.has_key?(:inclusion_filter)
filter_manager.exclude config.delete(:exclusion_filter) if config.has_key?(:exclusion_filter)
end
end
def all_configs
@all_configs ||= file_options << command_line_options << env_options
end
def file_options
custom_options_file ? [custom_options] : [global_options, local_options]
end
def env_options
ENV["SPEC_OPTS"] ? Parser.parse!(Shellwords.split(ENV["SPEC_OPTS"])) : {}
end
def command_line_options
@command_line_options ||= Parser.parse!(@args).merge :files_or_directories_to_run => @args
end
def custom_options
options_from(custom_options_file)
end
def local_options
@local_options ||= options_from(local_options_file)
end
def global_options
@global_options ||= options_from(global_options_file)
end
def options_from(path)
Parser.parse(args_from_options_file(path))
end
def args_from_options_file(path)
return [] unless path && File.exist?(path)
config_string = options_file_as_erb_string(path)
config_string.split(/\n+/).map {|l| l.shellsplit}.flatten
end
def options_file_as_erb_string(path)
ERB.new(File.read(path)).result(binding)
end
def custom_options_file
command_line_options[:custom_options_file]
end
def local_options_file
".rspec"
end
def global_options_file
begin
File.join(File.expand_path("~"), ".rspec")
rescue ArgumentError
warn "Unable to find ~/.rspec because the HOME environment variable is not set"
nil
end
end
end
end
end
| 1 | 8,258 | since we're calling the file .rspec-local, I think we should rename local_options to project_options and use local_options for .rspec-local - WDYT? | rspec-rspec-core | rb |
@@ -52,7 +52,7 @@ module Blacklight::Solr
protected
def build_connection
- RSolr.connect(connection_config)
+ RSolr.connect(connection_config.merge(adapter: connection_config[:http_adapter]))
end
end
end | 1 | # frozen_string_literal: true
module Blacklight::Solr
class Repository < Blacklight::AbstractRepository
##
# Find a single solr document result (by id) using the document configuration
# @param [String] id document's unique key value
# @param [Hash] params additional solr query parameters
def find id, params = {}
doc_params = params.reverse_merge(blacklight_config.default_document_solr_params)
.reverse_merge(qt: blacklight_config.document_solr_request_handler)
.merge(blacklight_config.document_unique_id_param => id)
solr_response = send_and_receive blacklight_config.document_solr_path || blacklight_config.solr_path, doc_params
raise Blacklight::Exceptions::RecordNotFound if solr_response.documents.empty?
solr_response
end
##
# Execute a search query against solr
# @param [Hash] params solr query parameters
def search params = {}
send_and_receive blacklight_config.solr_path, params.reverse_merge(qt: blacklight_config.qt)
end
##
# Execute a solr query
# @see [RSolr::Client#send_and_receive]
# @overload find(solr_path, params)
# Execute a solr query at the given path with the parameters
# @param [String] solr path (defaults to blacklight_config.solr_path)
# @param [Hash] parameters for RSolr::Client#send_and_receive
# @overload find(params)
# @param [Hash] parameters for RSolr::Client#send_and_receive
# @return [Blacklight::Solr::Response] the solr response object
def send_and_receive(path, solr_params = {})
benchmark("Solr fetch", level: :debug) do
key = blacklight_config.http_method == :post ? :data : :params
res = connection.send_and_receive(path, {key=>solr_params.to_hash, method: blacklight_config.http_method})
solr_response = blacklight_config.response_model.new(res, solr_params, document_model: blacklight_config.document_model, blacklight_config: blacklight_config)
Blacklight.logger.debug("Solr query: #{blacklight_config.http_method} #{path} #{solr_params.to_hash.inspect}")
Blacklight.logger.debug("Solr response: #{solr_response.inspect}") if defined?(::BLACKLIGHT_VERBOSE_LOGGING) and ::BLACKLIGHT_VERBOSE_LOGGING
solr_response
end
rescue Errno::ECONNREFUSED => e
raise Blacklight::Exceptions::ECONNREFUSED, "Unable to connect to Solr instance using #{connection.inspect}: #{e.inspect}"
rescue RSolr::Error::Http => e
raise Blacklight::Exceptions::InvalidRequest, e.message
end
protected
def build_connection
RSolr.connect(connection_config)
end
end
end
| 1 | 6,923 | Will we want to refactor this when we drop rsolr 1.x support? | projectblacklight-blacklight | rb |
@@ -63,7 +63,7 @@ func Request(path string, info *clientaccess.Info, requester HTTPRequester) ([]b
return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), username, password)
}
-func getNodeNamedCrt(nodeName, nodePasswordFile string) HTTPRequester {
+func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester {
return func(u string, client *http.Client, username, password string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil { | 1 | package config
import (
"bufio"
"context"
cryptorand "crypto/rand"
"crypto/tls"
"encoding/hex"
"encoding/pem"
"fmt"
"io/ioutil"
sysnet "net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent/proxy"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control"
"github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/net"
)
const (
DefaultPodManifestPath = "pod-manifests"
)
func Get(ctx context.Context, agent cmds.Agent, proxy proxy.Proxy) *config.Node {
for {
agentConfig, err := get(&agent, proxy)
if err != nil {
logrus.Error(err)
select {
case <-time.After(5 * time.Second):
continue
case <-ctx.Done():
logrus.Fatalf("Interrupted")
}
}
return agentConfig
}
}
type HTTPRequester func(u string, client *http.Client, username, password string) ([]byte, error)
func Request(path string, info *clientaccess.Info, requester HTTPRequester) ([]byte, error) {
u, err := url.Parse(info.URL)
if err != nil {
return nil, err
}
u.Path = path
username, password, _ := clientaccess.ParseUsernamePassword(info.Token)
return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), username, password)
}
func getNodeNamedCrt(nodeName, nodePasswordFile string) HTTPRequester {
return func(u string, client *http.Client, username, password string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return nil, err
}
if username != "" {
req.SetBasicAuth(username, password)
}
req.Header.Set(version.Program+"-Node-Name", nodeName)
nodePassword, err := ensureNodePassword(nodePasswordFile)
if err != nil {
return nil, err
}
req.Header.Set(version.Program+"-Node-Password", nodePassword)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusForbidden {
return nil, fmt.Errorf("Node password rejected, duplicate hostname or contents of '%s' may not match server node-passwd entry, try enabling a unique node name with the --with-node-id flag", nodePasswordFile)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", u, resp.Status)
}
return ioutil.ReadAll(resp.Body)
}
}
func ensureNodeID(nodeIDFile string) (string, error) {
if _, err := os.Stat(nodeIDFile); err == nil {
id, err := ioutil.ReadFile(nodeIDFile)
return strings.TrimSpace(string(id)), err
}
id := make([]byte, 4, 4)
_, err := cryptorand.Read(id)
if err != nil {
return "", err
}
nodeID := hex.EncodeToString(id)
return nodeID, ioutil.WriteFile(nodeIDFile, []byte(nodeID+"\n"), 0644)
}
func ensureNodePassword(nodePasswordFile string) (string, error) {
if _, err := os.Stat(nodePasswordFile); err == nil {
password, err := ioutil.ReadFile(nodePasswordFile)
return strings.TrimSpace(string(password)), err
}
password := make([]byte, 16, 16)
_, err := cryptorand.Read(password)
if err != nil {
return "", err
}
nodePassword := hex.EncodeToString(password)
return nodePassword, ioutil.WriteFile(nodePasswordFile, []byte(nodePassword+"\n"), 0600)
}
func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string) {
password, err := ioutil.ReadFile(oldNodePasswordFile)
if err != nil {
return
}
if err := ioutil.WriteFile(newNodePasswordFile, password, 0600); err != nil {
logrus.Warnf("Unable to write password file: %v", err)
return
}
if err := os.Remove(oldNodePasswordFile); err != nil {
logrus.Warnf("Unable to remove old password file: %v", err)
return
}
}
func getServingCert(nodeName, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodePasswordFile))
if err != nil {
return nil, err
}
servingCert, servingKey := splitCertKeyPEM(servingCert)
if err := ioutil.WriteFile(servingCertFile, servingCert, 0600); err != nil {
return nil, errors.Wrapf(err, "failed to write node cert")
}
if err := ioutil.WriteFile(servingKeyFile, servingKey, 0600); err != nil {
return nil, errors.Wrapf(err, "failed to write node key")
}
cert, err := tls.X509KeyPair(servingCert, servingKey)
if err != nil {
return nil, err
}
return &cert, nil
}
func getHostFile(filename, keyFile string, info *clientaccess.Info) error {
basename := filepath.Base(filename)
fileBytes, err := clientaccess.Get("/v1-"+version.Program+"/"+basename, info)
if err != nil {
return err
}
if keyFile == "" {
if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil {
return errors.Wrapf(err, "failed to write cert %s", filename)
}
} else {
fileBytes, keyBytes := splitCertKeyPEM(fileBytes)
if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil {
return errors.Wrapf(err, "failed to write cert %s", filename)
}
if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil {
return errors.Wrapf(err, "failed to write key %s", filename)
}
}
return nil
}
func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) {
for {
b, rest := pem.Decode(bytes)
if b == nil {
break
}
bytes = rest
if strings.Contains(b.Type, "PRIVATE KEY") {
keyPem = append(keyPem, pem.EncodeToMemory(b)...)
} else {
certPem = append(certPem, pem.EncodeToMemory(b)...)
}
}
return
}
func getNodeNamedHostFile(filename, keyFile, nodeName, nodePasswordFile string, info *clientaccess.Info) error {
basename := filepath.Base(filename)
fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodePasswordFile))
if err != nil {
return err
}
fileBytes, keyBytes := splitCertKeyPEM(fileBytes)
if err := ioutil.WriteFile(filename, fileBytes, 0600); err != nil {
return errors.Wrapf(err, "failed to write cert %s", filename)
}
if err := ioutil.WriteFile(keyFile, keyBytes, 0600); err != nil {
return errors.Wrapf(err, "failed to write key %s", filename)
}
return nil
}
func getHostnameAndIP(info cmds.Agent) (string, string, error) {
ip := info.NodeIP
if ip == "" {
hostIP, err := net.ChooseHostInterface()
if err != nil {
return "", "", err
}
ip = hostIP.String()
}
name := info.NodeName
if name == "" {
hostname, err := os.Hostname()
if err != nil {
return "", "", err
}
name = hostname
}
// Use lower case hostname to comply with kubernetes constraint:
// https://github.com/kubernetes/kubernetes/issues/71140
name = strings.ToLower(name)
return name, ip, nil
}
func isValidResolvConf(resolvConfFile string) bool {
file, err := os.Open(resolvConfFile)
if err != nil {
return false
}
defer file.Close()
nameserver := regexp.MustCompile(`^nameserver\s+([^\s]*)`)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
ipMatch := nameserver.FindStringSubmatch(scanner.Text())
if len(ipMatch) == 2 {
ip := sysnet.ParseIP(ipMatch[1])
if ip == nil || !ip.IsGlobalUnicast() {
return false
}
}
}
if err := scanner.Err(); err != nil {
return false
}
return true
}
func locateOrGenerateResolvConf(envInfo *cmds.Agent) string {
if envInfo.ResolvConf != "" {
return envInfo.ResolvConf
}
resolvConfs := []string{"/etc/resolv.conf", "/run/systemd/resolve/resolv.conf"}
for _, conf := range resolvConfs {
if isValidResolvConf(conf) {
return conf
}
}
tmpConf := filepath.Join(os.TempDir(), version.Program+"-resolv.conf")
if err := ioutil.WriteFile(tmpConf, []byte("nameserver 8.8.8.8\n"), 0444); err != nil {
logrus.Error(err)
return ""
}
return tmpConf
}
func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
if envInfo.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
info, err := clientaccess.ParseAndValidateToken(proxy.SupervisorURL(), envInfo.Token)
if err != nil {
return nil, err
}
controlConfig, err := getConfig(info)
if err != nil {
return nil, err
}
if controlConfig.SupervisorPort != controlConfig.HTTPSPort {
if err := proxy.StartAPIServerProxy(controlConfig.HTTPSPort); err != nil {
return nil, errors.Wrapf(err, "failed to setup access to API Server port %d on at %s", controlConfig.HTTPSPort, proxy.SupervisorURL())
}
}
var flannelIface *sysnet.Interface
if !envInfo.NoFlannel && len(envInfo.FlannelIface) > 0 {
flannelIface, err = sysnet.InterfaceByName(envInfo.FlannelIface)
if err != nil {
return nil, errors.Wrapf(err, "unable to find interface")
}
}
clientCAFile := filepath.Join(envInfo.DataDir, "client-ca.crt")
if err := getHostFile(clientCAFile, "", info); err != nil {
return nil, err
}
serverCAFile := filepath.Join(envInfo.DataDir, "server-ca.crt")
if err := getHostFile(serverCAFile, "", info); err != nil {
return nil, err
}
servingKubeletCert := filepath.Join(envInfo.DataDir, "serving-kubelet.crt")
servingKubeletKey := filepath.Join(envInfo.DataDir, "serving-kubelet.key")
nodePasswordRoot := "/"
if envInfo.Rootless {
nodePasswordRoot = envInfo.DataDir
}
nodeConfigPath := filepath.Join(nodePasswordRoot, "etc", "rancher", "node")
if err := os.MkdirAll(nodeConfigPath, 0755); err != nil {
return nil, err
}
oldNodePasswordFile := filepath.Join(envInfo.DataDir, "node-password.txt")
newNodePasswordFile := filepath.Join(nodeConfigPath, "password")
upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile)
nodeName, nodeIP, err := getHostnameAndIP(*envInfo)
if err != nil {
return nil, err
}
if envInfo.WithNodeID {
nodeID, err := ensureNodeID(filepath.Join(nodeConfigPath, "id"))
if err != nil {
return nil, err
}
nodeName += "-" + nodeID
}
servingCert, err := getServingCert(nodeName, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info)
if err != nil {
return nil, err
}
clientKubeletCert := filepath.Join(envInfo.DataDir, "client-kubelet.crt")
clientKubeletKey := filepath.Join(envInfo.DataDir, "client-kubelet.key")
if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, newNodePasswordFile, info); err != nil {
return nil, err
}
kubeconfigKubelet := filepath.Join(envInfo.DataDir, "kubelet.kubeconfig")
if err := control.KubeConfig(kubeconfigKubelet, proxy.APIServerURL(), serverCAFile, clientKubeletCert, clientKubeletKey); err != nil {
return nil, err
}
clientKubeProxyCert := filepath.Join(envInfo.DataDir, "client-kube-proxy.crt")
clientKubeProxyKey := filepath.Join(envInfo.DataDir, "client-kube-proxy.key")
if err := getHostFile(clientKubeProxyCert, clientKubeProxyKey, info); err != nil {
return nil, err
}
kubeconfigKubeproxy := filepath.Join(envInfo.DataDir, "kubeproxy.kubeconfig")
if err := control.KubeConfig(kubeconfigKubeproxy, proxy.APIServerURL(), serverCAFile, clientKubeProxyCert, clientKubeProxyKey); err != nil {
return nil, err
}
clientK3sControllerCert := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.crt")
clientK3sControllerKey := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.key")
if err := getHostFile(clientK3sControllerCert, clientK3sControllerKey, info); err != nil {
return nil, err
}
kubeconfigK3sController := filepath.Join(envInfo.DataDir, version.Program+"controller.kubeconfig")
if err := control.KubeConfig(kubeconfigK3sController, proxy.APIServerURL(), serverCAFile, clientK3sControllerCert, clientK3sControllerKey); err != nil {
return nil, err
}
nodeConfig := &config.Node{
Docker: envInfo.Docker,
SELinux: envInfo.EnableSELinux,
ContainerRuntimeEndpoint: envInfo.ContainerRuntimeEndpoint,
FlannelBackend: controlConfig.FlannelBackend,
}
nodeConfig.FlannelIface = flannelIface
nodeConfig.Images = filepath.Join(envInfo.DataDir, "images")
nodeConfig.AgentConfig.NodeIP = nodeIP
nodeConfig.AgentConfig.NodeName = nodeName
nodeConfig.AgentConfig.NodeConfigPath = nodeConfigPath
nodeConfig.AgentConfig.NodeExternalIP = envInfo.NodeExternalIP
nodeConfig.AgentConfig.ServingKubeletCert = servingKubeletCert
nodeConfig.AgentConfig.ServingKubeletKey = servingKubeletKey
nodeConfig.AgentConfig.ClusterDNS = controlConfig.ClusterDNS
nodeConfig.AgentConfig.ClusterDomain = controlConfig.ClusterDomain
nodeConfig.AgentConfig.ResolvConf = locateOrGenerateResolvConf(envInfo)
nodeConfig.AgentConfig.ClientCA = clientCAFile
nodeConfig.AgentConfig.ListenAddress = "0.0.0.0"
nodeConfig.AgentConfig.KubeConfigKubelet = kubeconfigKubelet
nodeConfig.AgentConfig.KubeConfigKubeProxy = kubeconfigKubeproxy
nodeConfig.AgentConfig.KubeConfigK3sController = kubeconfigK3sController
if envInfo.Rootless {
nodeConfig.AgentConfig.RootDir = filepath.Join(envInfo.DataDir, "kubelet")
}
nodeConfig.AgentConfig.PauseImage = envInfo.PauseImage
nodeConfig.AgentConfig.Snapshotter = envInfo.Snapshotter
nodeConfig.AgentConfig.IPSECPSK = controlConfig.IPSECPSK
nodeConfig.AgentConfig.StrongSwanDir = filepath.Join(envInfo.DataDir, "strongswan")
nodeConfig.CACerts = info.CACerts
nodeConfig.Containerd.Config = filepath.Join(envInfo.DataDir, "etc/containerd/config.toml")
nodeConfig.Containerd.Root = filepath.Join(envInfo.DataDir, "containerd")
nodeConfig.Containerd.Opt = filepath.Join(envInfo.DataDir, "containerd")
if !envInfo.Debug {
nodeConfig.Containerd.Log = filepath.Join(envInfo.DataDir, "containerd/containerd.log")
}
nodeConfig.Containerd.State = "/run/k3s/containerd"
nodeConfig.Containerd.Address = filepath.Join(nodeConfig.Containerd.State, "containerd.sock")
nodeConfig.Containerd.Template = filepath.Join(envInfo.DataDir, "etc/containerd/config.toml.tmpl")
nodeConfig.Certificate = servingCert
if nodeConfig.FlannelBackend == config.FlannelBackendNone {
nodeConfig.NoFlannel = true
} else {
nodeConfig.NoFlannel = envInfo.NoFlannel
}
if !nodeConfig.NoFlannel {
hostLocal, err := exec.LookPath("host-local")
if err != nil {
return nil, errors.Wrapf(err, "failed to find host-local")
}
if envInfo.FlannelConf == "" {
nodeConfig.FlannelConf = filepath.Join(envInfo.DataDir, "etc/flannel/net-conf.json")
} else {
nodeConfig.FlannelConf = envInfo.FlannelConf
nodeConfig.FlannelConfOverride = true
}
nodeConfig.AgentConfig.CNIBinDir = filepath.Dir(hostLocal)
nodeConfig.AgentConfig.CNIConfDir = filepath.Join(envInfo.DataDir, "etc/cni/net.d")
}
if !nodeConfig.Docker && nodeConfig.ContainerRuntimeEndpoint == "" {
nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.Containerd.Address
} else {
nodeConfig.AgentConfig.RuntimeSocket = nodeConfig.ContainerRuntimeEndpoint
nodeConfig.AgentConfig.CNIPlugin = true
}
if controlConfig.ClusterIPRange != nil {
nodeConfig.AgentConfig.ClusterCIDR = *controlConfig.ClusterIPRange
}
os.Setenv("NODE_NAME", nodeConfig.AgentConfig.NodeName)
nodeConfig.AgentConfig.ExtraKubeletArgs = envInfo.ExtraKubeletArgs
nodeConfig.AgentConfig.ExtraKubeProxyArgs = envInfo.ExtraKubeProxyArgs
nodeConfig.AgentConfig.NodeTaints = envInfo.Taints
nodeConfig.AgentConfig.NodeLabels = envInfo.Labels
nodeConfig.AgentConfig.PrivateRegistry = envInfo.PrivateRegistry
nodeConfig.AgentConfig.DisableCCM = controlConfig.DisableCCM
nodeConfig.AgentConfig.DisableNPC = controlConfig.DisableNPC
nodeConfig.AgentConfig.DisableKubeProxy = controlConfig.DisableKubeProxy
nodeConfig.AgentConfig.Rootless = envInfo.Rootless
nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, DefaultPodManifestPath)
nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults
return nodeConfig, nil
}
func getConfig(info *clientaccess.Info) (*config.Control, error) {
data, err := clientaccess.Get("/v1-"+version.Program+"/config", info)
if err != nil {
return nil, err
}
controlControl := &config.Control{}
return controlControl, json.Unmarshal(data, controlControl)
}
| 1 | 8,523 | should this really be multiple IPs? | k3s-io-k3s | go |
@@ -0,0 +1 @@
+<%= render partial: "proposal", locals: { proposal: @proposal} %> | 1 | 1 | 12,872 | Hmm, is that partial used in multiple places? Maybe we can just move that file in here. | 18F-C2 | rb |
|
@@ -0,0 +1,16 @@
+// Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"). You may
+// not use this file except in compliance with the License. A copy of the
+// License is located at
+//
+// http://aws.amazon.com/apache2.0/
+//
+// or in the "license" file accompanying this file. This file is distributed
+// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+// express or implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package async
+
+//go:generate mockgen.sh github.com/aws/amazon-ecs-agent/agent/async Cache mocks/async_mocks.go | 1 | 1 | 14,257 | Should be 2014-2016 | aws-amazon-ecs-agent | go |
|
@@ -1976,6 +1976,11 @@ namespace pwiz.Skyline.Model
private static Type GetColumnType(string value, IFormatProvider provider)
{
double result;
+ var quote = @"""";
+ if (value.StartsWith(quote) && value.EndsWith(quote))
+ {
+ value = value.Substring(1, value.Length - 2);
+ }
if (double.TryParse(value, NumberStyles.Number, provider, out result))
return typeof(double);
else if (FastaSequence.IsExSequence(value)) | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
using pwiz.Common.SystemUtil;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.Model.Crosslinking;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.DocSettings.Extensions;
using pwiz.Skyline.Model.Irt;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
using Array = System.Array;
namespace pwiz.Skyline.Model
{
public class FastaImporter
{
private const int MAX_EMPTY_PEPTIDE_GROUP_COUNT = 2000;
public static int MaxEmptyPeptideGroupCount
{
get { return TestMaxEmptyPeptideGroupCount ?? MAX_EMPTY_PEPTIDE_GROUP_COUNT; }
}
private int _countPeptides;
private int _countIons;
readonly ModificationMatcher _modMatcher;
public FastaImporter(SrmDocument document, bool peptideList)
{
Document = document;
PeptideList = peptideList;
}
public FastaImporter(SrmDocument document, ModificationMatcher modMatcher)
: this(document, true)
{
_modMatcher = modMatcher;
}
public SrmDocument Document { get; private set; }
public bool PeptideList { get; private set; }
public int EmptyPeptideGroupCount { get; private set; }
public IEnumerable<PeptideGroupDocNode> Import(TextReader reader, IProgressMonitor progressMonitor, long lineCount)
{
bool requireLibraryMatch = Document.Settings.PeptideSettings.Libraries.Pick == PeptidePick.library
|| Document.Settings.PeptideSettings.Libraries.Pick == PeptidePick.both;
// Set starting values for limit counters
int originalPeptideCount = Document.PeptideCount;
_countPeptides = originalPeptideCount;
_countIons = Document.PeptideTransitionCount;
// Store set of existing FASTA sequences to keep from duplicating
HashSet<FastaSequence> set = new HashSet<FastaSequence>();
foreach (PeptideGroupDocNode nodeGroup in Document.Children)
{
FastaSequence fastaSeq = nodeGroup.Id as FastaSequence;
if (fastaSeq != null)
set.Add(fastaSeq);
}
var peptideGroupsNew = new List<PeptideGroupDocNode>();
var dictGroupsNew = new Dictionary<string, int>();
PeptideGroupBuilder seqBuilder = null;
long linesRead = 0;
int progressPercent = -1;
string line;
IProgressStatus status = new ProgressStatus(string.Empty);
while ((line = reader.ReadLine()) != null)
{
linesRead++;
if (progressMonitor != null)
{
// TODO when changing from ILongWaitBroker to IProgressMonitor, the old code was:
// if (progressMonitor.IsCanceled || progressMonitor.IsDocumentChanged(Document))
// IProgressMonitor does not have IsDocumentChanged.
if (progressMonitor.IsCanceled)
{
EmptyPeptideGroupCount = 0;
return new PeptideGroupDocNode[0];
}
int progressNew = (int) (linesRead*100/lineCount);
if (progressPercent != progressNew)
progressMonitor.UpdateProgress(status = status.ChangePercentComplete(progressPercent = progressNew));
}
if (line.StartsWith(@">"))
{
if (!requireLibraryMatch && progressMonitor == null)
{
if (_countIons > SrmDocument.MaxTransitionCount)
{
throw new InvalidDataException(TextUtil.LineSeparate(string.Format(Resources.FastaImporter_Import_This_import_causes_the_document_to_contain_more_than__0_n0__transitions_in__1_n0__peptides_at_line__2_n0__,
SrmDocument.MaxTransitionCount, _countPeptides, linesRead), Resources.FastaImporter_Import_Check_your_settings_to_make_sure_you_are_using_a_library_and_restrictive_enough_transition_selection_));
}
else if (_countPeptides > SrmDocument.MAX_PEPTIDE_COUNT)
{
throw new InvalidDataException(TextUtil.LineSeparate(string.Format(Resources.FastaImporter_Import_This_import_causes_the_document_to_contain_more_than__0_n0__peptides_at_line__1_n0__,
SrmDocument.MAX_PEPTIDE_COUNT, linesRead), Resources.FastaImporter_Import_Check_your_settings_to_make_sure_you_are_using_a_library_));
}
}
try
{
if (seqBuilder != null)
AddPeptideGroup(peptideGroupsNew, dictGroupsNew, set, seqBuilder);
seqBuilder = _modMatcher == null
? new PeptideGroupBuilder(line, PeptideList, Document.Settings, null)
: new PeptideGroupBuilder(line, _modMatcher, Document.Settings, null);
}
catch (Exception x)
{
throw new InvalidDataException(string.Format(Resources.FastaImporter_Import_Error_at_or_around_line__0____1_, linesRead, x.Message), x);
}
if (progressMonitor != null)
{
string message = string.Format(Resources.FastaImporter_Import_Adding_protein__0__,
seqBuilder.Name);
int newPeptideCount = _countPeptides - originalPeptideCount;
if (newPeptideCount > 0)
{
message = TextUtil.LineSeparate(message,
string.Format(Resources.FastaImporter_Import__0__proteins_and__1__peptides_added, peptideGroupsNew.Count,
newPeptideCount));
}
progressMonitor.UpdateProgress(status = status.ChangeMessage(message));
}
}
else if (seqBuilder == null)
{
if (line.Trim().Length == 0)
continue;
break;
}
else
{
seqBuilder.AppendSequence(line);
}
}
// Add last sequence.
if (seqBuilder != null)
AddPeptideGroup(peptideGroupsNew, dictGroupsNew, set, seqBuilder);
return peptideGroupsNew;
}
private void AddPeptideGroup(List<PeptideGroupDocNode> listGroups,
Dictionary<string, int> dictGroupsNew,
ICollection<FastaSequence> set,
PeptideGroupBuilder builder)
{
PeptideGroupDocNode nodeGroup = builder.ToDocNode();
FastaSequence fastaSeq = nodeGroup.Id as FastaSequence;
if (fastaSeq != null && set.Contains(fastaSeq))
return;
if (nodeGroup.MoleculeCount == 0)
{
EmptyPeptideGroupCount++;
// If more than MaxEmptyPeptideGroupCount, then don't keep the empty peptide groups
// This is not useful and is likely to cause memory and performance issues
if (EmptyPeptideGroupCount > MaxEmptyPeptideGroupCount)
{
if (EmptyPeptideGroupCount == MaxEmptyPeptideGroupCount + 1)
{
ReduceToNonEmptyGroups(listGroups, dictGroupsNew);
}
return;
}
}
int indexExist;
if (fastaSeq != null && dictGroupsNew.TryGetValue(fastaSeq.Sequence, out indexExist))
{
AddPeptideGroupAlternative(listGroups, indexExist, fastaSeq);
return;
}
if (fastaSeq != null)
dictGroupsNew.Add(fastaSeq.Sequence, listGroups.Count);
listGroups.Add(nodeGroup);
_countPeptides += nodeGroup.MoleculeCount;
_countIons += nodeGroup.TransitionCount;
}
private static void ReduceToNonEmptyGroups(List<PeptideGroupDocNode> listGroups, Dictionary<string, int> dictGroupsNew)
{
var nonEmptyGroups = listGroups.Where(g => g.MoleculeCount > 0).ToArray();
listGroups.Clear();
listGroups.AddRange(nonEmptyGroups);
dictGroupsNew.Clear();
for (int i = 0; i < listGroups.Count; i++)
{
var seq = listGroups[i].Id as FastaSequence;
if (seq != null)
dictGroupsNew.Add(seq.Sequence, i);
}
}
private static void AddPeptideGroupAlternative(List<PeptideGroupDocNode> listGroups, int indexExist, FastaSequence fastaSeq)
{
var nodeGroupExist = listGroups[indexExist];
var fastaSeqExist = (FastaSequence) nodeGroupExist.Id;
string seqName = fastaSeq.Name;
if (Equals(fastaSeqExist.Name, seqName) || fastaSeqExist.Alternatives.Contains(a => Equals(a.Name, seqName)))
return; // The new name for this sequence is already accounted for
// Add this as an alternative to the existing node
fastaSeq = fastaSeqExist.AddAlternative(new ProteinMetadata(fastaSeq.Name, fastaSeq.Description));
listGroups[indexExist] = new PeptideGroupDocNode(fastaSeq, nodeGroupExist.Annotations,
nodeGroupExist.Name, nodeGroupExist.Description,
nodeGroupExist.Peptides.ToArray(), nodeGroupExist.AutoManageChildren);
}
/// <summary>
/// Converts columnar data into FASTA format.
/// Assumes either:
/// Name multicolumnDescription Sequence
/// or:
/// Name Description Sequence otherColumns
/// </summary>
/// <param name="text">Text string containing columnar data</param>
/// <param name="separator">Column separator</param>
/// <returns>Conversion to FASTA format</returns>
public static string ToFasta(string text, char separator)
{
var reader = new StringReader(text);
var sb = new StringBuilder(text.Length);
string line;
int lineNum = 0;
while ((line = reader.ReadLine()) != null)
{
lineNum++;
string[] columns = line.Split(separator);
if (columns.Length < 2)
throw new LineColNumberedIoException(Resources.FastaImporter_ToFasta_Too_few_columns_found, lineNum, -1);
int fastaCol = columns.Length - 1; // Start with assumption of Name Description Sequence
string seq = columns[fastaCol].Trim();
if ((fastaCol > 2) && (!FastaSequence.IsExSequence(seq)))
{
// Possibly from PasteDlg, form of Name Description Sequence Accession PreferredName Gene Species
fastaCol = 2;
seq = columns[fastaCol].Trim();
}
if (!FastaSequence.IsExSequence(seq))
throw new LineColNumberedIoException(
Resources.FastaImporter_ToFasta_Last_column_does_not_contain_a_valid_protein_sequence, lineNum,
fastaCol);
sb.Append(@">").Append(columns[0].Trim().Replace(@" ", @"_")); // ID
for (int i = 1; i < fastaCol; i++)
sb.Append(@" ").Append(columns[i].Trim()); // Description
sb.AppendLine();
sb.AppendLine(seq); // Sequence
}
return sb.ToString();
}
#region Test support
public static int? TestMaxEmptyPeptideGroupCount { get; set; }
#endregion
}
public class MassListInputs
{
private readonly string _inputFilename;
public string InputFilename { get { return _inputFilename; } }
private readonly string _inputText;
public string InputText { get { return _inputText; } }
private IList<string> _lines;
public MassListInputs(string initText, bool fullText = false)
{
if (fullText)
_inputText = initText;
else
_inputFilename = initText;
}
public MassListInputs(string inputText, IFormatProvider formatProvider, char separator)
{
_inputText = inputText;
FormatProvider = formatProvider;
Separator = separator;
}
public MassListInputs(IList<string> lines)
{
InitFormat(lines);
_lines = lines;
}
public IList<string> ReadLines(IProgressMonitor progressMonitor, IProgressStatus status = null)
{
return _lines ?? (_lines = _inputFilename != null ? ReadLinesFromFile(progressMonitor, status) : ReadLinesFromText());
}
private IList<string> ReadLinesFromFile(IProgressMonitor progressMonitor, IProgressStatus status)
{
using (var reader = new LineReaderWithProgress(_inputFilename, progressMonitor, status))
{
var inputLines = new List<string>();
string line;
while ((line = reader.ReadLine()?.Trim()) != null)
{
if (line.Length > 0)
inputLines.Add(line);
}
if (inputLines.Count == 0)
throw new InvalidDataException(Resources.MassListImporter_Import_Empty_transition_list);
InitFormat(inputLines);
return inputLines;
}
}
private IList<string> ReadLinesFromText()
{
var inputLines = ReadLinesFromText(_inputText);
InitFormat(inputLines);
return inputLines;
}
public static IList<string> ReadLinesFromText(string text)
{
var inputLines = new List<string>();
using (var readerLines = new StringReader(text))
{
string line;
while ((line = readerLines.ReadLine()) != null)
{
if (line.Trim().Length == 0)
continue;
inputLines.Add(line);
}
}
if (inputLines.Count == 0)
throw new InvalidDataException(Resources.MassListImporter_Import_Empty_transition_list);
return inputLines;
}
private void InitFormat(IList<string> inputLines)
{
if (FormatProvider == null)
{
char sep;
IFormatProvider provider;
Type[] columnTypes;
string inputLine = 0 < inputLines.Count ? inputLines[0] : string.Empty;
if (!MassListImporter.IsColumnar(inputLine, out provider, out sep, out columnTypes))
{
throw new IOException(Resources.SkylineWindow_importMassListMenuItem_Click_Data_columns_not_found_in_first_line);
}
// If there are no numbers in the first line, try the second. Without numbers the format provider may not be correct
if (columnTypes.All(t => Type.GetTypeCode(t) != TypeCode.Double))
{
inputLine = 1 < inputLines.Count ? inputLines[1] : string.Empty;
if (!MassListImporter.IsColumnar(inputLine, out provider, out sep, out columnTypes) ||
columnTypes.All(t => Type.GetTypeCode(t) != TypeCode.Double))
{
throw new IOException(Resources.SkylineWindow_importMassListMenuItem_Click_Data_columns_not_found_in_first_line);
}
}
FormatProvider = provider;
Separator = sep;
}
}
public IFormatProvider FormatProvider { get; set; }
public char Separator { get; set; }
}
public class MassListImporter
{
private const int INSPECT_LINES = 50;
public const int MZ_ROUND_DIGITS = 4;
// ReSharper disable NotAccessedField.Local
private int _countPeptides;
private int _countIons;
// ReSharper restore NotAccessedField.Local
private int _linesSeen;
public MassListImporter(SrmDocument document, MassListInputs inputs)
{
Document = document;
Inputs = inputs;
}
public SrmDocument Document { get; private set; }
public MassListRowReader RowReader { get; private set; }
public SrmSettings Settings { get { return Document.Settings; } }
public MassListInputs Inputs { get; private set; }
public IFormatProvider FormatProvider { get { return Inputs.FormatProvider; } }
public char Separator { get { return Inputs.Separator; } }
public bool IsSmallMoleculeInput { get; private set; }
public PeptideModifications GetModifications(SrmDocument document)
{
return RowReader != null ? RowReader.GetModifications(document) : document.Settings.PeptideSettings.Modifications;
}
private const int PERCENT_READER = 95;
public bool PreImport(IProgressMonitor progressMonitor, ColumnIndices indices, bool tolerateErrors)
{
IProgressStatus status = new ProgressStatus(Resources.MassListImporter_Import_Reading_transition_list).ChangeSegments(0, 3);
// Get the lines used to guess the necessary columns and create the row reader
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled)
return false;
progressMonitor.UpdateProgress(status);
}
var lines = new List<string>(Inputs.ReadLines(progressMonitor, status));
status = status.NextSegment();
_linesSeen = 0;
if (SmallMoleculeTransitionListCSVReader.IsPlausibleSmallMoleculeTransitionList(lines))
{
IsSmallMoleculeInput = true;
if (progressMonitor != null)
progressMonitor.UpdateProgress(status.Complete());
return true;
}
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled)
return false;
progressMonitor.UpdateProgress(status = status.ChangeMessage(Resources.MassListImporter_Import_Inspecting_peptide_sequence_information));
}
if (indices != null)
{
// CONSIDER: Only used by Edit > Insert > Transition List (should we still pass in headers?)
RowReader = new GeneralRowReader(FormatProvider, Separator, indices, Settings, lines, progressMonitor, status);
}
else
{
// Check first line for validity
var line = lines.FirstOrDefault();
if (string.IsNullOrEmpty(line))
throw new InvalidDataException(Resources.MassListImporter_Import_Invalid_transition_list_Transition_lists_must_contain_at_least_precursor_m_z_product_m_z_and_peptide_sequence);
indices = ColumnIndices.FromLine(line, Separator, s => GetColumnType(s, FormatProvider));
if (indices.Headers != null)
{
lines.RemoveAt(0);
_linesSeen++;
}
// If no numeric columns in the first row
RowReader = ExPeptideRowReader.Create(FormatProvider, Separator, indices, Settings, lines, progressMonitor, status);
if (RowReader == null)
{
RowReader = GeneralRowReader.Create(FormatProvider, Separator, indices, Settings, lines, tolerateErrors, progressMonitor, status);
if (RowReader == null)
throw new LineColNumberedIoException(Resources.MassListImporter_Import_Failed_to_find_peptide_column, 1, -1);
}
}
return true;
}
public IEnumerable<PeptideGroupDocNode> DoImport(IProgressMonitor progressMonitor,
IDictionary<string, FastaSequence> dictNameSeq,
List<MeasuredRetentionTime> irtPeptides,
List<SpectrumMzInfo> librarySpectra,
List<TransitionImportErrorInfo> errorList)
{
_countPeptides = Document.PeptideCount;
_countIons = Document.PeptideTransitionCount;
List<PeptideGroupDocNode> peptideGroupsNew = new List<PeptideGroupDocNode>();
PeptideGroupBuilder seqBuilder = null;
IProgressStatus status = new ProgressStatus();
var lines = RowReader.Lines;
// Process lines
_linesSeen = 0;
for (var index = 0; index < lines.Count; index++)
{
string row = lines[index];
var errorInfo = RowReader.NextRow(row, ++_linesSeen);
if (errorInfo != null)
{
errorList.Add(errorInfo);
continue;
}
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled)
{
irtPeptides.Clear();
librarySpectra.Clear();
errorList.Clear();
return new PeptideGroupDocNode[0];
}
int percentComplete = (_linesSeen * PERCENT_READER / lines.Count);
if (status.PercentComplete != percentComplete)
{
string message = string.Format(Resources.MassListImporter_Import_Importing__0__,
RowReader.TransitionInfo.ProteinName ?? RowReader.TransitionInfo.PeptideSequence);
progressMonitor.UpdateProgress(status = status.ChangePercentComplete(percentComplete).ChangeMessage(message));
}
}
seqBuilder = AddRow(seqBuilder, RowReader, dictNameSeq, peptideGroupsNew, row, _linesSeen, Inputs.InputFilename, irtPeptides, librarySpectra, errorList);
}
// Add last sequence.
if (seqBuilder != null)
AddPeptideGroup(peptideGroupsNew, seqBuilder, irtPeptides, librarySpectra, errorList);
return MergeEqualGroups(progressMonitor, peptideGroupsNew, ref status);
}
private IList<PeptideGroupDocNode> MergeEqualGroups(IProgressMonitor progressMonitor,
IList<PeptideGroupDocNode> peptideGroups, ref IProgressStatus status)
{
var listKeys = new List<PeptideGroupDocNode>(); // Maintain ordered list of keys
var dictGroupsToMergeLists = new Dictionary<PeptideGroupDocNode, List<PeptideGroupDocNode>>();
bool merge = false;
foreach (var nodeGroup in peptideGroups)
{
var nodeGroupWithoutChildren = (PeptideGroupDocNode) nodeGroup.ChangeChildren(new PeptideDocNode[0]);
List<PeptideGroupDocNode> groupsToMerge;
if (dictGroupsToMergeLists.TryGetValue(nodeGroupWithoutChildren, out groupsToMerge))
merge = true; // Seeing a group twice means a merge is necessary
else
{
groupsToMerge = new List<PeptideGroupDocNode>();
dictGroupsToMergeLists.Add(nodeGroupWithoutChildren, groupsToMerge);
listKeys.Add(nodeGroupWithoutChildren);
}
groupsToMerge.Add(nodeGroup);
}
if (!merge)
return peptideGroups;
var peptideGroupsNew = new List<PeptideGroupDocNode>();
int keysAdded = 0;
foreach (var groupsToMerge in listKeys.Select(k => dictGroupsToMergeLists[k]))
{
if (groupsToMerge.Count == 1)
peptideGroupsNew.Add(groupsToMerge[0]);
else
{
var nodeGroupNew = groupsToMerge[0];
foreach (var peptideGroupDocNode in groupsToMerge.Skip(1))
nodeGroupNew = nodeGroupNew.Merge(peptideGroupDocNode);
peptideGroupsNew.Add(nodeGroupNew);
}
keysAdded++;
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled)
return new PeptideGroupDocNode[0];
int percentComplete = (keysAdded * (100 - PERCENT_READER) / listKeys.Count) + PERCENT_READER;
if (status.PercentComplete != percentComplete)
{
// TODO(brendanx): Switch to new message for 20.1 "Merging lists and targets"
progressMonitor.UpdateProgress(status = status.ChangePercentComplete(percentComplete)
.ChangeMessage(Resources.MassListImporter_Import_Reading_transition_list));
}
}
}
return peptideGroupsNew;
}
private PeptideGroupBuilder AddRow(PeptideGroupBuilder seqBuilder,
MassListRowReader rowReader,
IDictionary<string, FastaSequence> dictNameSeq,
ICollection<PeptideGroupDocNode> peptideGroupsNew,
string lineText,
long lineNum,
string sourceFile,
List<MeasuredRetentionTime> irtPeptides,
List<SpectrumMzInfo> librarySpectra,
List<TransitionImportErrorInfo> errorList)
{
var info = rowReader.TransitionInfo;
var irt = rowReader.Irt;
var libraryIntensity = rowReader.LibraryIntensity;
var productMz = rowReader.ProductMz;
if (irt == null && rowReader.IrtColumn != -1)
{
var error = new TransitionImportErrorInfo(string.Format(Resources.MassListImporter_AddRow_Invalid_iRT_value_at_precusor_m_z__0__for_peptide__1_,
rowReader.TransitionInfo.PrecursorMz,
rowReader.TransitionInfo.ModifiedSequence),
rowReader.IrtColumn,
lineNum, lineText);
errorList.Add(error);
return seqBuilder;
}
if (libraryIntensity == null && rowReader.LibraryColumn != -1)
{
var error = new TransitionImportErrorInfo(string.Format(Resources.MassListImporter_AddRow_Invalid_library_intensity_at_precursor__0__for_peptide__1_,
rowReader.TransitionInfo.PrecursorMz,
rowReader.TransitionInfo.ModifiedSequence),
rowReader.LibraryColumn,
lineNum, lineText);
errorList.Add(error);
return seqBuilder;
}
string name = info.ProteinName;
if (info.TransitionExps.Any(t => t.IsDecoy))
name = PeptideGroup.DECOYS;
if (seqBuilder == null || (name != null && !Equals(name, seqBuilder.BaseName)))
{
if (seqBuilder != null)
{
AddPeptideGroup(peptideGroupsNew, seqBuilder, irtPeptides, librarySpectra, errorList);
}
FastaSequence fastaSeq;
if (name != null && dictNameSeq.TryGetValue(name, out fastaSeq) && fastaSeq != null)
seqBuilder = new PeptideGroupBuilder(fastaSeq, Document.Settings, sourceFile);
else
{
string safeName = name != null ?
Helpers.GetUniqueName(name, dictNameSeq.Keys) :
Document.GetPeptideGroupId(true);
seqBuilder = new PeptideGroupBuilder(@">>" + safeName, true, Document.Settings, sourceFile) {BaseName = name};
}
}
try
{
seqBuilder.AppendTransition(info, irt, libraryIntensity, productMz, lineText, lineNum);
}
catch (InvalidDataException x)
{
throw new LineColNumberedIoException(x.Message, lineNum, -1, x);
}
return seqBuilder;
}
private void AddPeptideGroup(ICollection<PeptideGroupDocNode> listGroups,
PeptideGroupBuilder builder,
List<MeasuredRetentionTime> irtPeptides,
List<SpectrumMzInfo> librarySpectra,
List<TransitionImportErrorInfo> errorList)
{
PeptideGroupDocNode nodeGroup = builder.ToDocNode();
listGroups.Add(nodeGroup);
irtPeptides.AddRange(builder.IrtPeptides);
librarySpectra.AddRange(builder.LibrarySpectra);
if (builder.PeptideGroupErrorInfo.Count > 0)
errorList.AddRange(builder.PeptideGroupErrorInfo);
_countPeptides += nodeGroup.MoleculeCount;
_countIons += nodeGroup.TransitionCount;
}
public abstract class MassListRowReader
{
protected MassListRowReader(IFormatProvider provider,
char separator,
ColumnIndices indices,
IList<string> lines,
SrmSettings settings,
IEnumerable<string> sequences,
IProgressMonitor progressMonitor,
IProgressStatus status)
{
FormatProvider = provider;
Separator = separator;
Indices = indices;
Lines = lines;
Settings = settings;
ModMatcher = CreateModificationMatcher(settings, sequences, lines.Count, progressMonitor, status);
NodeDictionary = new Dictionary<string, PeptideDocNode>();
}
private static ModificationMatcher CreateModificationMatcher(SrmSettings settings, IEnumerable<string> sequences,
int expectedCount = 0, IProgressMonitor progressMonitor = null, IProgressStatus status = null)
{
var modMatcher = new ModificationMatcher();
// We want AutoSelect on so we can generate transition groups, but we want the filter to
// be lenient because we are only using this to match modifications, not generate the
// final transition groups
var settingsMatcher = settings.ChangeTransitionFilter(filter => filter.ChangeAutoSelect(true))
.ChangeTransitionFullScan(fullscan => fullscan.ChangePrecursorIsotopes(FullScanPrecursorIsotopes.None, null, null))
.ChangeTransitionFilter(filter => filter.ChangePeptidePrecursorCharges(Enumerable.Range(TransitionGroup.MIN_PRECURSOR_CHARGE,
TransitionGroup.MAX_PRECURSOR_CHARGE).Select(Adduct.FromChargeProtonated).ToArray()));
try
{
var distinctSequences = GetDistinctSequences(sequences, expectedCount, progressMonitor, status);
if (status != null)
status = status.NextSegment();
modMatcher.CreateMatches(settingsMatcher,
distinctSequences,
Properties.Settings.Default.StaticModList,
Properties.Settings.Default.HeavyModList,
progressMonitor, status); // Can't use expected count
}
catch (FormatException)
{
modMatcher.CreateMatches(settingsMatcher,
new string[0],
Properties.Settings.Default.StaticModList,
Properties.Settings.Default.HeavyModList,
progressMonitor, status);
}
return modMatcher;
}
private static IList<string> GetDistinctSequences(IEnumerable<string> sequences, int expectedCount,
IProgressMonitor progressMonitor, IProgressStatus status)
{
if (sequences == null)
return new string[0];
var setSeen = new HashSet<string>(expectedCount/4);
var listSeen = new List<string>(expectedCount/4);
int sequenceCurrent = 0;
foreach (string sequence in sequences)
{
if (progressMonitor != null)
{
sequenceCurrent++;
if (progressMonitor.IsCanceled)
return new string[0];
if (expectedCount > 0)
progressMonitor.UpdateProgress(status = status.UpdatePercentCompleteProgress(progressMonitor, sequenceCurrent, expectedCount));
}
if (!setSeen.Contains(sequence))
{
setSeen.Add(sequence);
listSeen.Add(sequence);
}
}
return listSeen;
}
protected SrmSettings Settings { get; private set; }
protected string[] Fields { get; private set; }
public IList<string> Lines { get; private set; }
private IFormatProvider FormatProvider { get; set; }
private char Separator { get; set; }
private ModificationMatcher ModMatcher { get; set; }
private Dictionary<string, PeptideDocNode> NodeDictionary { get; set; }
public ColumnIndices Indices { get; private set; }
protected int ProteinColumn { get { return Indices.ProteinColumn; } }
protected int PeptideColumn { get { return Indices.PeptideColumn; } }
protected int LabelTypeColumn { get { return Indices.LabelTypeColumn; } }
protected int FragmentNameColumn { get { return Indices.FragmentNameColumn; } }
private int PrecursorColumn { get { return Indices.PrecursorColumn; } }
protected double PrecursorMz { get { return ColumnMz(Fields, PrecursorColumn, FormatProvider); } }
protected int PrecursorChargeColumn { get { return Indices.PrecursorChargeColumn; } }
protected int? PrecursorCharge { get { return ColumnInt(Fields, PrecursorChargeColumn, FormatProvider); } }
private int ProductColumn { get { return Indices.ProductColumn; } }
public double ProductMz { get { return ColumnMz(Fields, ProductColumn, FormatProvider); } }
private int ProductChargeColumn { get { return Indices.ProductChargeColumn; } }
protected int? ProductCharge { get { return ColumnInt(Fields, ProductChargeColumn, FormatProvider); } }
private int DecoyColumn { get { return Indices.DecoyColumn; } }
public int IrtColumn { get { return Indices.IrtColumn; } }
public double? Irt { get { return ColumnDouble(Fields, IrtColumn, FormatProvider); } }
public int LibraryColumn { get { return Indices.LibraryColumn; } }
public double? LibraryIntensity { get { return ColumnDouble(Fields, LibraryColumn, FormatProvider); } }
protected bool IsDecoy
{
get { return DecoyColumn != -1 && Equals(Fields[DecoyColumn].ToLowerInvariant(), @"true"); }
}
public PeptideModifications GetModifications(SrmDocument document)
{
return ModMatcher.GetDocModifications(document);
}
private double MzMatchTolerance { get { return Settings.TransitionSettings.Instrument.MzMatchTolerance; } }
public ExTransitionInfo TransitionInfo { get; private set; }
private bool IsHeavyAllowed
{
get { return Settings.PeptideSettings.Modifications.HasHeavyImplicitModifications; }
}
private bool IsHeavyTypeAllowed(IsotopeLabelType labelType)
{
return Settings.TryGetPrecursorCalc(labelType, null) != null;
}
public TransitionImportErrorInfo NextRow(string line, long lineNum)
{
Fields = line.ParseDsvFields(Separator);
if (PeptideColumn == -1)
return new TransitionImportErrorInfo(Resources.MassListRowReader_NextRow_No_peptide_sequence_column_specified, null, lineNum, line);
ExTransitionInfo info = CalcTransitionInfo(lineNum);
if (!FastaSequence.IsExSequence(info.PeptideSequence))
{
return new TransitionImportErrorInfo(string.Format(Resources.MassListRowReader_NextRow_Invalid_peptide_sequence__0__found,
info.PeptideSequence),
PeptideColumn,
lineNum, line);
}
if (!info.DefaultLabelType.IsLight && !IsHeavyTypeAllowed(info.DefaultLabelType))
{
return new TransitionImportErrorInfo(TextUtil.SpaceSeparate(Resources.MassListRowReader_NextRow_Isotope_labeled_entry_found_without_matching_settings_,
Resources.MassListRowReader_NextRow_Check_the_Modifications_tab_in_Transition_Settings),
LabelTypeColumn,
lineNum, line);
}
TransitionImportErrorInfo errorInfo;
info = CalcPrecursorExplanations(info, line, lineNum, out errorInfo);
if (errorInfo != null)
{
return errorInfo;
}
TransitionInfo = CalcTransitionExplanations(info, line, lineNum, out errorInfo);
return errorInfo;
}
protected abstract ExTransitionInfo CalcTransitionInfo(long lineNum);
private ExTransitionInfo CalcPrecursorExplanations(ExTransitionInfo info, string lineText, long lineNum, out TransitionImportErrorInfo errorInfo)
{
// Enumerate all possible variable modifications looking for an explanation
// for the precursor information
errorInfo = null;
double precursorMz = info.PrecursorMz;
int? precursorZ = PrecursorCharge;
double nearestMz = double.MaxValue;
var peptideMods = Settings.PeptideSettings.Modifications;
PeptideDocNode nodeForModPep = null;
string modifiedSequence = info.ModifiedSequence;
if (!Equals(modifiedSequence, info.PeptideSequence))
{
if (!NodeDictionary.TryGetValue(modifiedSequence, out nodeForModPep))
{
nodeForModPep = ModMatcher.GetModifiedNode(modifiedSequence, null);
NodeDictionary.Add(modifiedSequence, nodeForModPep);
}
info.ModifiedSequence = nodeForModPep == null ? null : nodeForModPep.RawTextId;
}
var nodesToConsider = nodeForModPep != null ?
new List<PeptideDocNode> {nodeForModPep} :
Peptide.CreateAllDocNodes(Settings, info.PeptideSequence);
foreach (var nodePep in nodesToConsider)
{
var variableMods = nodePep.ExplicitMods;
var defaultLabelType = info.DefaultLabelType;
var precursorMassH = Settings.GetPrecursorMass(defaultLabelType, info.PeptideTarget, variableMods);
int precursorMassShift;
int nearestCharge;
Adduct precursorCharge = CalcPrecursorCharge(precursorMassH, precursorZ, precursorMz, MzMatchTolerance, !nodePep.IsProteomic,
info.IsDecoy, out precursorMassShift, out nearestCharge);
if (!precursorCharge.IsEmpty)
{
info.TransitionExps.Add(new TransitionExp(variableMods, precursorCharge, defaultLabelType,
precursorMassShift));
}
else
{
nearestMz = NearestMz(info.PrecursorMz, nearestMz, precursorMassH, nearestCharge);
}
if (!IsHeavyAllowed || info.IsExplicitLabelType)
continue;
foreach (var labelType in peptideMods.GetHeavyModifications().Select(typeMods => typeMods.LabelType))
{
if (!Settings.HasPrecursorCalc(labelType, variableMods))
{
continue;
}
precursorMassH = Settings.GetPrecursorMass(labelType, info.PeptideTarget, variableMods);
precursorCharge = CalcPrecursorCharge(precursorMassH, precursorZ, precursorMz, MzMatchTolerance, !nodePep.IsProteomic,
info.IsDecoy, out precursorMassShift, out nearestCharge);
if (!precursorCharge.IsEmpty)
{
info.TransitionExps.Add(new TransitionExp(variableMods, precursorCharge, labelType,
precursorMassShift));
}
else
{
nearestMz = NearestMz(info.PrecursorMz, nearestMz, precursorMassH, nearestCharge);
}
}
}
if (info.TransitionExps.Count == 0)
{
// TODO: Consistent central formatting for m/z values
// Use Math.Round() to avoid forcing extra decimal places
nearestMz = Math.Round(nearestMz, MZ_ROUND_DIGITS);
precursorMz = Math.Round(SequenceMassCalc.PersistentMZ(precursorMz), MZ_ROUND_DIGITS);
double deltaMz = Math.Round(Math.Abs(precursorMz - nearestMz), MZ_ROUND_DIGITS);
errorInfo = new TransitionImportErrorInfo(TextUtil.SpaceSeparate(string.Format(Resources.MassListRowReader_CalcPrecursorExplanations_,
precursorMz, nearestMz, deltaMz, info.PeptideSequence),
Resources.MzMatchException_suggestion),
PrecursorColumn,
lineNum, lineText);
}
else if (!Settings.TransitionSettings.Instrument.IsMeasurable(precursorMz))
{
precursorMz = Math.Round(SequenceMassCalc.PersistentMZ(precursorMz), MZ_ROUND_DIGITS);
errorInfo = new TransitionImportErrorInfo(TextUtil.SpaceSeparate(string.Format(Resources.MassListRowReader_CalcPrecursorExplanations_The_precursor_m_z__0__of_the_peptide__1__is_out_of_range_for_the_instrument_settings_,
precursorMz, info.PeptideSequence),
Resources.MassListRowReader_CalcPrecursorExplanations_Check_the_Instrument_tab_in_the_Transition_Settings),
PrecursorColumn,
lineNum, lineText);
}
// If it's within the instrument settings but not measurable, problem must be in the isolation scheme
else if (!Settings.TransitionSettings.IsMeasurablePrecursor(precursorMz))
{
precursorMz = Math.Round(SequenceMassCalc.PersistentMZ(precursorMz), MZ_ROUND_DIGITS);
errorInfo = new TransitionImportErrorInfo(TextUtil.SpaceSeparate(string.Format(Resources.MassListRowReader_CalcPrecursorExplanations_The_precursor_m_z__0__of_the_peptide__1__is_outside_the_range_covered_by_the_DIA_isolation_scheme_,
precursorMz, info.PeptideSequence),
Resources.MassListRowReader_CalcPrecursorExplanations_Check_the_isolation_scheme_in_the_full_scan_settings_),
PrecursorColumn,
lineNum, lineText);
}
return info;
}
private static double NearestMz(double precursorMz, double nearestMz, TypedMass precursorMassH, int precursorCharge)
{
var newMz = SequenceMassCalc.GetMZ(precursorMassH, precursorCharge);
return Math.Abs(precursorMz - newMz) < Math.Abs(precursorMz - nearestMz)
? newMz
: nearestMz;
}
private static Adduct CalcPrecursorCharge(TypedMass precursorMassH,
int? precursorZ,
double precursorMz,
double tolerance,
bool isCustomIon,
bool isDecoy,
out int massShift,
out int nearestCharge)
{
return TransitionCalc.CalcPrecursorCharge(precursorMassH, precursorZ, precursorMz, tolerance, isCustomIon, isDecoy, out massShift, out nearestCharge);
}
private ExTransitionInfo CalcTransitionExplanations(ExTransitionInfo info, string lineText, long lineNum, out TransitionImportErrorInfo errorInfo)
{
errorInfo = null;
var sequence = info.PeptideTarget;
double productMz = ProductMz;
int? productZ = ProductCharge;
foreach (var transitionExp in info.TransitionExps.ToArray())
{
var mods = transitionExp.Precursor.VariableMods;
var calc = Settings.GetFragmentCalc(transitionExp.Precursor.LabelType, mods);
var productPrecursorMass = calc.GetPrecursorFragmentMass(sequence);
var productMasses = calc.GetFragmentIonMasses(sequence);
var potentialLosses = TransitionGroup.CalcPotentialLosses(sequence,
Settings.PeptideSettings.Modifications, mods, calc.MassType);
var types = Settings.TransitionSettings.Filter.PeptideIonTypes;
IonType? ionType;
int? ordinal;
TransitionLosses losses;
int massShift;
var productCharge = TransitionCalc.CalcProductCharge(productPrecursorMass,
productZ,
transitionExp.Precursor.PrecursorAdduct,
types,
productMasses,
potentialLosses,
productMz,
MzMatchTolerance,
calc.MassType,
transitionExp.ProductShiftType,
out ionType,
out ordinal,
out losses,
out massShift);
if (!productCharge.IsEmpty && ionType.HasValue && ordinal.HasValue)
{
transitionExp.Product = new ProductExp(productCharge, ionType.Value, ordinal.Value, losses, massShift);
}
else
{
info.TransitionExps.Remove(transitionExp);
}
}
if (info.TransitionExps.Count == 0)
{
productMz = Math.Round(productMz, MZ_ROUND_DIGITS);
// TODO: Consistent central formatting for m/z values
// Use Math.Round() to avoid forcing extra decimal places
errorInfo = new TransitionImportErrorInfo(string.Format(Resources.MassListRowReader_CalcTransitionExplanations_Product_m_z_value__0__in_peptide__1__has_no_matching_product_ion,
productMz, info.PeptideSequence),
ProductColumn,
lineNum, lineText);
}
else if (!Settings.TransitionSettings.Instrument.IsMeasurable(productMz))
{
productMz = Math.Round(productMz, MZ_ROUND_DIGITS);
errorInfo = new TransitionImportErrorInfo(TextUtil.SpaceSeparate(string.Format(Resources.MassListRowReader_CalcTransitionExplanations_The_product_m_z__0__is_out_of_range_for_the_instrument_settings__in_the_peptide_sequence__1_,
productMz, info.PeptideSequence),
Resources.MassListRowReader_CalcPrecursorExplanations_Check_the_Instrument_tab_in_the_Transition_Settings),
ProductColumn,
lineNum, lineText);
}
return info;
}
private static double ColumnMz(string[] fields, int column, IFormatProvider provider)
{
double result;
// CONSIDER: This does not allow exponents or thousands separators like the default double.Parse(). Should it?
if (column == -1)
{
return 0;
}
if (double.TryParse(fields[column], NumberStyles.Number, provider, out result))
return result;
return 0; // Invalid m/z
}
private static double? ColumnDouble(string[] fields, int column, IFormatProvider provider)
{
double result;
if (column != -1 && double.TryParse(fields[column], NumberStyles.Float|NumberStyles.AllowThousands, provider, out result))
return result;
return null;
}
private static int? ColumnInt(string[] fields, int column, IFormatProvider provider)
{
int result;
if (column != -1 && int.TryParse(fields[column], NumberStyles.Integer, provider, out result))
return result;
return null;
}
protected static int FindPrecursor(string[] fields,
string sequence,
string modifiedSequence,
IsotopeLabelType labelType,
int iSequence,
int iDecoy,
double tolerance,
IFormatProvider provider,
SrmSettings settings,
out IList<TransitionExp> transitionExps)
{
PeptideDocNode nodeForModPep = null;
if (!Equals(modifiedSequence, sequence))
{
var modMatcher = CreateModificationMatcher(settings, new[] {modifiedSequence});
nodeForModPep = modMatcher.GetModifiedNode(modifiedSequence, null);
}
var nodesToConsider = Peptide.CreateAllDocNodes(settings, sequence).ToList();
if (nodeForModPep != null)
nodesToConsider.Insert(0, nodeForModPep);
transitionExps = new List<TransitionExp>();
int indexPrec = -1;
foreach (PeptideDocNode nodePep in nodesToConsider)
{
var mods = nodePep.ExplicitMods;
var calc = settings.TryGetPrecursorCalc(labelType, mods);
if (calc == null)
continue;
var precursorMassH = calc.GetPrecursorMass(nodePep.Peptide.Target);
bool isDecoy = iDecoy != -1 && Equals(fields[iDecoy].ToLowerInvariant(), @"true");
for (int i = 0; i < fields.Length; i++)
{
if (indexPrec != -1 && i != indexPrec)
continue;
if (i == iSequence)
continue;
double precursorMz = ColumnMz(fields, i, provider);
// With increased maximum charge state, avoid guessing very high charge states for smaller values
if (precursorMz == 0 || precursorMz < settings.TransitionSettings.Instrument.MinMz)
continue;
int massShift;
int nearestCharge;
var charge = CalcPrecursorCharge(precursorMassH, null, precursorMz, tolerance, !nodePep.IsProteomic, isDecoy, out massShift, out nearestCharge);
if (!charge.IsEmpty)
{
indexPrec = i;
transitionExps.Add(new TransitionExp(mods, charge, labelType, massShift));
}
}
}
return indexPrec;
}
protected static int FindProduct(string[] fields, string seq, IEnumerable<TransitionExp> transitionExps,
int iSequence, int iPrecursor, double tolerance, IFormatProvider provider, SrmSettings settings)
{
double maxProductMz = 0;
int maxIndex = -1;
var sequence = new Target(seq);
var types = settings.TransitionSettings.Filter.PeptideIonTypes;
foreach (var transitionExp in transitionExps)
{
var mods = transitionExp.Precursor.VariableMods;
var calc = settings.GetFragmentCalc(transitionExp.Precursor.LabelType, mods);
var productPrecursorMass = calc.GetPrecursorFragmentMass(sequence);
var productMasses = calc.GetFragmentIonMasses(sequence);
var potentialLosses = TransitionGroup.CalcPotentialLosses(sequence,
settings.PeptideSettings.Modifications, mods, calc.MassType);
for (int i = 0; i < fields.Length; i++)
{
if (i == iSequence || i == iPrecursor)
continue;
double productMz = ColumnMz(fields, i, provider);
if (productMz == 0)
continue;
IonType? ionType;
int? ordinal;
TransitionLosses losses;
int massShift;
var charge = TransitionCalc.CalcProductCharge(productPrecursorMass,
null, // CONSIDER: Use product charge field?
transitionExp.Precursor.PrecursorAdduct,
types,
productMasses,
potentialLosses,
productMz,
tolerance,
calc.MassType,
transitionExp.ProductShiftType,
out ionType,
out ordinal,
out losses,
out massShift);
// Look for the maximum product m/z, or this function may settle for a
// collision energy or retention time that matches a single amino acid
if (!charge.IsEmpty && productMz > maxProductMz)
{
maxProductMz = productMz;
maxIndex = i;
}
}
}
return maxIndex;
}
}
private class GeneralRowReader : MassListRowReader
{
public GeneralRowReader(IFormatProvider provider,
char separator,
ColumnIndices indices,
SrmSettings settings,
IList<string> lines,
IProgressMonitor progressMonitor,
IProgressStatus status)
: base(provider, separator, indices, lines, settings, GetSequencesFromLines(lines, separator, indices), progressMonitor, status)
{
}
private static IsotopeLabelType GetLabelType(string typeId)
{
typeId = typeId.ToLower();
return ((Equals(typeId, IsotopeLabelType.HEAVY_NAME.Substring(0, 1)) || Equals(typeId, IsotopeLabelType.HEAVY_NAME)) ? IsotopeLabelType.heavy : IsotopeLabelType.light);
}
protected override ExTransitionInfo CalcTransitionInfo(long lineNum)
{
string proteinName = null;
if (ProteinColumn != -1)
proteinName = Fields[ProteinColumn];
string peptideSequence = RemoveSequenceNotes(Fields[PeptideColumn]);
string modifiedSequence = RemoveModifiedSequenceNotes(Fields[PeptideColumn]);
var info = new ExTransitionInfo(proteinName, peptideSequence, modifiedSequence, PrecursorMz, IsDecoy);
if (LabelTypeColumn != -1)
{
info.DefaultLabelType = GetLabelType(Fields[LabelTypeColumn]);
info.IsExplicitLabelType = true;
}
return info;
}
private struct PrecursorCandidate
{
public PrecursorCandidate(int sequenceIndex, int precursorMzIdex, string sequence, IList<TransitionExp> transitionExps, int labelIndex) : this()
{
SequenceIndex = sequenceIndex;
PrecursorMzIdex = precursorMzIdex;
Sequence = sequence;
TransitionExps = transitionExps;
LabelIndex = labelIndex;
}
public int SequenceIndex { get; private set; }
public int PrecursorMzIdex { get; private set; }
public string Sequence { get; private set; }
public IList<TransitionExp> TransitionExps { get; private set; }
public int LabelIndex { get; private set; }
}
public static GeneralRowReader Create(IFormatProvider provider, char separator, ColumnIndices indices, SrmSettings settings, IList<string> lines,
bool tolerateErrors, IProgressMonitor progressMonitor, IProgressStatus status)
{
// Split the first line into fields.
Assume.IsTrue(lines.Count > 0);
// Look for sequence column
string[] fieldsFirstRow = null;
PrecursorCandidate[] sequenceCandidates = null;
int bestCandidateIndex = -1;
int iLabelType = -1;
double tolerance = settings.TransitionSettings.Instrument.MzMatchTolerance;
var linesSeen = 0;
status = progressMonitor != null
? (status ?? new ProgressStatus()).ChangeMessage(Resources.MassListImporter_Import_Inspecting_peptide_sequence_information)
: null;
foreach (var line in lines)
{
if (progressMonitor != null)
{
if (progressMonitor.IsCanceled)
{
return null;
}
status = status.UpdatePercentCompleteProgress(progressMonitor, linesSeen++, lines.Count);
}
string[] fields = line.ParseDsvFields(separator);
if (fieldsFirstRow == null)
fieldsFirstRow = fields;
// Choose precursor field candidates from the first row
if (sequenceCandidates == null)
{
iLabelType = FindLabelType(fields, lines, separator);
// If no sequence column found, return null. After this, all errors throw.
var newSeqCandidates = FindSequenceCandidates(fields);
if (newSeqCandidates.Length == 0)
{
if (tolerateErrors)
{
break; // Caller will assign columns by other means
}
return null;
}
var listNewCandidates = new List<PrecursorCandidate>();
foreach (var candidateIndex in newSeqCandidates)
{
string sequence = RemoveSequenceNotes(fields[candidateIndex]);
string modifiedSequence = RemoveModifiedSequenceNotes(fields[candidateIndex]);
var candidateMzIndex = -1;
IList<TransitionExp> transitionExps = null;
var usingLabelTypeColumn = iLabelType != -1;
// Consider the possibility that label column has been misidentified (could be some other reason for a column full
// of the word "light", as in CommandLineAssayImportTest\OpenSWATH_SM4_NoError.csv)
for (var pass = 0; pass < (usingLabelTypeColumn ? 2 : 1) && candidateMzIndex == -1; pass++)
{
IsotopeLabelType labelType;
if (pass == 0)
{
labelType = usingLabelTypeColumn ? GetLabelType(fields[iLabelType]) : IsotopeLabelType.light;
}
else
{
// Perhaps label column was falsely identified
labelType = IsotopeLabelType.light;
usingLabelTypeColumn = false;
}
candidateMzIndex = FindPrecursor(fields, sequence, modifiedSequence, labelType, candidateIndex, indices.DecoyColumn,
tolerance, provider, settings, out transitionExps);
// If no match, and no specific label type, then try heavy.
if (settings.PeptideSettings.Modifications.HasHeavyModifications &&
candidateMzIndex == -1 && !usingLabelTypeColumn)
{
var peptideMods = settings.PeptideSettings.Modifications;
foreach (var typeMods in peptideMods.GetHeavyModifications())
{
if (settings.TryGetPrecursorCalc(typeMods.LabelType, null) != null)
{
candidateMzIndex = FindPrecursor(fields, sequence, modifiedSequence, typeMods.LabelType, candidateIndex, indices.DecoyColumn,
tolerance, provider, settings, out transitionExps);
if (candidateMzIndex != -1)
{
break;
}
}
}
}
}
if (candidateMzIndex != -1)
{
listNewCandidates.Add(new PrecursorCandidate(candidateIndex, candidateMzIndex, sequence, transitionExps, usingLabelTypeColumn ? iLabelType : -1));
}
}
if (listNewCandidates.Count == 0)
{
if (tolerateErrors)
{
break; // Caller will assign columns by other means
}
throw new MzMatchException(Resources.GeneralRowReader_Create_No_valid_precursor_m_z_column_found, 1, -1);
}
sequenceCandidates = listNewCandidates.ToArray();
}
bestCandidateIndex = FindBestCandidate(sequenceCandidates, fields);
// Break if a best candidate was found
if (bestCandidateIndex != -1)
break;
}
if (sequenceCandidates == null)
{
if (!tolerateErrors)
{
return null;
}
}
else
{
if (bestCandidateIndex == -1)
bestCandidateIndex = 0;
var prec = sequenceCandidates[bestCandidateIndex];
int iSequence = prec.SequenceIndex;
int iPrecursor = prec.PrecursorMzIdex;
int iProduct = FindProduct(fieldsFirstRow, prec.Sequence, prec.TransitionExps, prec.SequenceIndex, prec.PrecursorMzIdex,
tolerance, provider, settings);
if (iProduct == -1)
throw new MzMatchException(Resources.GeneralRowReader_Create_No_valid_product_m_z_column_found, 1, -1);
int iProt = indices.ProteinColumn;
if (iProt == -1)
iProt = FindProtein(fieldsFirstRow, iSequence, lines, indices.Headers, provider, separator);
int iPrecursorCharge = indices.PrecursorChargeColumn;
// Explicitly declaring the charge state interferes with downstream logic that matches m/z and peptide
// to plausible peptide modifications
//if (iPrecursorCharge == -1)
// iPrecursorCharge = FindPrecursorCharge(fieldsFirstRow, lines, separator);
int iFragmentName = indices.FragmentNameColumn;
if (iFragmentName == -1)
iFragmentName = FindFragmentName(fieldsFirstRow, lines, separator);
iLabelType = prec.LabelIndex;
indices.AssignDetected(iProt, iSequence, iPrecursor, iProduct, iLabelType, iFragmentName, iPrecursorCharge);
}
return new GeneralRowReader(provider, separator, indices, settings, lines, progressMonitor, status);
}
private static int[] FindSequenceCandidates(string[] fields)
{
var listCandidates = new List<int>();
for (int i = 0; i < fields.Length; i++)
{
var fieldUpper = fields[i].ToUpper(CultureInfo.InvariantCulture);
if (@"TRUE" == fieldUpper || @"FALSE" == fieldUpper)
continue;
string seqPotential = RemoveSequenceNotes(fields[i]);
if (seqPotential.Length < 2)
continue;
if (FastaSequence.IsExSequence(seqPotential))
{
listCandidates.Add(i);
}
}
return listCandidates.ToArray();
}
private static int FindBestCandidate(PrecursorCandidate[] precursorCandidates, string[] fields)
{
Assume.IsTrue(precursorCandidates.Length > 0);
if (precursorCandidates.Length == 1)
return 0;
// If any of the options has modification indicators, return it.
for (int i = 0; i < precursorCandidates.Length; i++)
{
var prec = precursorCandidates[i];
string seq = fields[prec.SequenceIndex];
if (!Equals(seq, RemoveSequenceNotes(seq)))
return i;
}
// Otherwise, it is not possible to distinguish the candidates from each other.
return -1;
}
private static string RemoveSequenceNotes(string seq)
{
seq = RemoveSpectronautQuoting(seq);
string seqClean = FastaSequence.StripModifications(seq);
int dotIndex = seqClean.IndexOf('.');
if (dotIndex != -1 || (dotIndex = seqClean.IndexOf('_')) != -1)
seqClean = seqClean.Substring(0, dotIndex);
seqClean = seqClean.TrimEnd('+');
return seqClean;
}
private static string RemoveSpectronautQuoting(string seq)
{
// Spectronaut adds underscores to the beginning and the end of most of its sequence column text
if (seq.StartsWith(@"_") && seq.EndsWith(@"_"))
seq = seq.Substring(1, seq.Length - 2);
return seq;
}
private static string RemoveModifiedSequenceNotes(string seq)
{
seq = RemoveSpectronautQuoting(seq);
// Find all occurrences of . and _
var dotIndices = new List<int>();
for (int i = 0; i < seq.Length; ++i)
{
if (seq[i] == '.' || seq[i] == '_')
{
dotIndices.Add(i);
}
}
var matches = FastaSequence.RGX_ALL.Matches(seq);
int precedingNtermModLength = 0;
foreach (Match match in matches)
{
int start = match.Groups[0].Index;
int end = start + match.Groups[0].Length - 1;
// Detect the case where an N-terminal modification is specified before the first AA
if (start == 0)
precedingNtermModLength = end + 1;
// Ignore instances of . or _ that are within a modification tag
dotIndices = dotIndices.Where(index => index < start || end < index).ToList();
}
dotIndices.Sort();
// Chop at the first instance of . or _ outside a modification tag
if(dotIndices.Any())
{
seq = seq.Substring(0, dotIndices.First());
}
seq = seq.TrimEnd('+');
// If an N-terminal mod at the start, move it to after the first AA
if (precedingNtermModLength > 0 && precedingNtermModLength < seq.Length)
{
seq = seq.ElementAt(precedingNtermModLength) + seq.Substring(0, precedingNtermModLength) +
seq.Substring(precedingNtermModLength + 1);
}
return FastaSequence.NormalizeNTerminalMod(seq); // Make sure any n-terminal mod gets moved to after the first AA
}
private static readonly string[] EXCLUDE_PROTEIN_VALUES = { @"true", @"false", @"heavy", @"light", @"unit" };
private static int FindProtein(string[] fields, int iSequence, IEnumerable<string> lines, IList<string> headers,
IFormatProvider provider, char separator)
{
// First look for all columns that are non-numeric with more that 2 characters
List<int> listDescriptive = new List<int>();
for (int i = 0; i < fields.Length; i++)
{
if (i == iSequence)
continue;
string fieldValue = fields[i];
double tempDouble;
if (!double.TryParse(fieldValue, NumberStyles.Number, provider, out tempDouble))
{
if (fieldValue.Length > 2 && !EXCLUDE_PROTEIN_VALUES.Contains(fieldValue.ToLowerInvariant()))
listDescriptive.Add(i);
}
}
if (listDescriptive.Count > 0)
{
// Count the distribution of values in all lines for the candidate columns
Dictionary<string, int> sequenceCounts = new Dictionary<string, int>();
Dictionary<string, int>[] valueCounts = new Dictionary<string, int>[listDescriptive.Count];
for (int i = 0; i < valueCounts.Length; i++)
valueCounts[i] = new Dictionary<string, int>();
foreach (string line in lines)
{
string[] fieldsNext = line.ParseDsvFields(separator);
if (iSequence >= fieldsNext.Length)
continue;
AddCount(fieldsNext[iSequence], sequenceCounts);
for (int i = 0; i < valueCounts.Length; i++)
{
int iField = listDescriptive[i];
string key = (iField >= fieldsNext.Length ? string.Empty : fieldsNext[iField]);
AddCount(key, valueCounts[i]);
}
}
for (int i = valueCounts.Length - 1; i >= 0; i--)
{
// Discard any column with empty cells or which is less repetitive
int count;
if (valueCounts[i].TryGetValue(string.Empty, out count) || valueCounts[i].Count > sequenceCounts.Count)
listDescriptive.RemoveAt(i);
}
// If more than one possible value, and there are headers, look for
// one with the word protein in it.
if (headers != null && listDescriptive.Count > 1)
{
foreach (int i in listDescriptive)
{
if (headers[i].ToLowerInvariant().Contains(@"protein")) // : Since many transition list files are generated in English
return i;
}
}
// At this point, just use the first possible value, if one is present
if (listDescriptive.Count > 0)
{
return listDescriptive[0];
}
}
return -1;
}
// Finds the index of the Label Type columns
private static int FindLabelType(string[] fields, IList<string> lines, char separator)
{
var labelCandidates = new List<int>();
// Look for any columns that contain something that looks like a Label Type and add them to a list
for (int i = 0; i < fields.Length; i++)
{
if (ContainsLabelType(fields[i]))
{
labelCandidates.Add(i);
}
}
if (labelCandidates.Count == 0)
{
return -1;
}
var LabelCandidates = labelCandidates.ToArray();
// Confirm that the rest of the column has only entries that look like Label Types and return its index,
// if not move onto the next entry in the array
foreach (var i in LabelCandidates)
{
var allGood = true;
foreach (var line in lines)
{
var fieldsNext = line.ParseDsvFields(separator);
if (i >= fieldsNext.Length || !ContainsLabelType(fieldsNext[i]))
{
allGood = false;
break;
}
}
if (allGood)
{
return i;
}
}
return -1;
}
// Helper method for FindLabelType
private static bool ContainsLabelType(string field)
{
field = field.ToLower(); // Now our detection is case insensitive
if (Equals(field, IsotopeLabelType.LIGHT_NAME.Substring(0, 1)) || // Checks for "L"
(Equals(field, IsotopeLabelType.HEAVY_NAME.Substring(0, 1)) || // Checks for "H"
(Equals(field, IsotopeLabelType.LIGHT_NAME)) || // Checks for "light"
(Equals(field, IsotopeLabelType.HEAVY_NAME)))) // Checks for "heavy"
{
return true;
}
return false;
}
// Finds the index of the Fragment Name Column
private static int FindFragmentName(string[] fields, IList<string> lines, char separator)
{
var fragCandidates = new List<int>();
// Look for any columns that contain something that looks like a Fragment Name and add them to a list
for (int i = 0; i < fields.Length; i++)
{
if (RGX_FRAGMENT_NAME.IsMatch(fields[i]))
{
fragCandidates.Add(i);
}
}
if (fragCandidates.Count == 0)
{
return -1;
}
var FragCandidates = fragCandidates.ToArray();
// Confirm that the rest of the column has only entries that look like Fragment Names and return its index,
// if not move onto the next entry in the array
foreach (int i in FragCandidates)
{
bool allGood = true;
foreach (var line in lines)
{
var fieldsNext = line.ParseDsvFields(separator);
if (!RGX_FRAGMENT_NAME.IsMatch(fieldsNext[i]))
{
allGood = false;
break;
}
}
if (allGood)
{
return i;
}
}
return -1;
}
// N.B. using a regex here for consistency with pwiz_tools\Skyline\SettingsUI\EditOptimizationLibraryDlg.cs(401)
// Regular expression for finding a fragment name. Checks if the first character is a,b,c,x,y, or z and the second character is a digit
private static readonly Regex RGX_FRAGMENT_NAME = new Regex(@"precursor|([abcxyz][\d]+)", RegexOptions.IgnoreCase);
// This detection method for Precursor Charge interferes with downstream logic for guessing peptide modifications
/*private static int FindPrecursorCharge (string[] fields, IList<string> lines, char separator)
{
var listCandidates = new List<int>();
for (int i = 0; i < fields.Length; i++)
{
// If any of the cells in the first row look like precursor charges, we add their index to the list of candidates
if (ContainsPrecursorCharge(fields[i]))
{
listCandidates.Add(i);
}
}
var ListCandidates = listCandidates.ToArray();
// We test every cell in each candidate column and return the first column whose contents consistently meet our criteria
foreach (var i in ListCandidates)
{
var allGood = true;
foreach (var line in lines)
{
var fieldsNext = line.ParseDsvFields(separator);
if (!ContainsPrecursorCharge(fieldsNext[i]))
{
allGood = false;
break;
}
}
if (allGood)
{
return i;
}
}
return -1;
}
// Helper method for FindPrecursorCharge
private static bool ContainsPrecursorCharge(string field)
{
// Checks if we can turn the string into an integer
if (int.TryParse(field, out int j))
{
// Checks if the integer is between the range of possible charges
if (j >= TransitionGroup.MIN_PRECURSOR_CHARGE && j <= TransitionGroup.MAX_PRECURSOR_CHARGE)
{
return true;
}
}
return false;
}*/
private static void AddCount(string key, IDictionary<string, int> dict)
{
int count;
if (dict.TryGetValue(key, out count))
dict[key]++;
else
dict.Add(key, 1);
}
private static IEnumerable<string> GetSequencesFromLines(IEnumerable<string> lines, char separator, ColumnIndices indices)
{
return lines.Select(line => RemoveModifiedSequenceNotes(line.ParseDsvField(separator, indices.PeptideColumn)));
}
}
private class ExPeptideRowReader : MassListRowReader
{
// Protein.Peptide.+.Label
private const string REGEX_PEPTIDE_FORMAT = @"^([^. ]+)\.([A-Za-z 0-9_+\-\[\]]+)\..+\.(light|{0})$";
private ExPeptideRowReader(IFormatProvider provider,
char separator,
ColumnIndices indices,
Regex exPeptideRegex,
SrmSettings settings,
IList<string> lines,
IProgressMonitor progressMonitor,
IProgressStatus status)
: base(provider, separator, indices, lines, settings, GetSequencesFromLines(lines, separator, indices, exPeptideRegex), progressMonitor, status)
{
ExPeptideRegex = exPeptideRegex;
}
private Regex ExPeptideRegex { get; set; }
protected override ExTransitionInfo CalcTransitionInfo(long lineNum)
{
string exPeptide = Fields[PeptideColumn];
Match match = ExPeptideRegex.Match(exPeptide);
if (!match.Success)
throw new LineColNumberedIoException(string.Format(Resources.ExPeptideRowReader_CalcTransitionInfo_Invalid_extended_peptide_format__0__, exPeptide), lineNum, PeptideColumn);
try
{
string proteinName = GetProteinName(match);
string peptideSequence = GetSequence(match);
string modifiedSequence = GetModifiedSequence(match);
var info = new ExTransitionInfo(proteinName, peptideSequence, modifiedSequence, PrecursorMz, IsDecoy)
{
DefaultLabelType = GetLabelType(match, Settings),
IsExplicitLabelType = true
};
return info;
}
catch (Exception)
{
throw new LineColNumberedIoException(
string.Format(Resources.ExPeptideRowReader_CalcTransitionInfo_Invalid_extended_peptide_format__0__,
exPeptide),
lineNum, PeptideColumn);
}
}
public static ExPeptideRowReader Create(IFormatProvider provider, char separator, ColumnIndices indices, SrmSettings settings, IList<string> lines,
IProgressMonitor progressMonitor, IProgressStatus status)
{
// Split the first line into fields.
Debug.Assert(lines.Count > 0);
string[] fields = lines[0].ParseDsvFields(separator);
// Create the ExPeptide regular expression
var modSettings = settings.PeptideSettings.Modifications;
var heavyTypeNames = from typedMods in modSettings.GetHeavyModifications()
select typedMods.LabelType.Name;
string exPeptideFormat = string.Format(REGEX_PEPTIDE_FORMAT, string.Join(@"|", heavyTypeNames.ToArray()));
var exPeptideRegex = new Regex(exPeptideFormat);
// Look for sequence column
string sequence;
string modifiedSequence;
IsotopeLabelType labelType;
int iExPeptide = FindExPeptide(fields, exPeptideRegex, settings, out sequence, out modifiedSequence, out labelType);
// If no sequence column found, return null. After this,
// all errors throw.
if (iExPeptide == -1)
return null;
if (!labelType.IsLight && !modSettings.HasHeavyImplicitModifications)
{
var message = TextUtil.LineSeparate(Resources.ExPeptideRowReader_Create_Isotope_labeled_entry_found_without_matching_settings,
Resources.ExPeptideRowReaderCreateCheck_the_Modifications_tab_in_Transition_Settings);
throw new LineColNumberedIoException(message, 1, iExPeptide);
}
double tolerance = settings.TransitionSettings.Instrument.MzMatchTolerance;
IList<TransitionExp> transitionExps;
int iPrecursor = FindPrecursor(fields, sequence, modifiedSequence, labelType, iExPeptide, indices.DecoyColumn,
tolerance, provider, settings, out transitionExps);
if (iPrecursor == -1)
throw new MzMatchException(Resources.GeneralRowReader_Create_No_valid_precursor_m_z_column_found, 1, -1);
int iProduct = FindProduct(fields, sequence, transitionExps, iExPeptide, iPrecursor,
tolerance, provider, settings);
if (iProduct == -1)
throw new MzMatchException(Resources.GeneralRowReader_Create_No_valid_product_m_z_column_found, 1, -1);
indices.AssignDetected(iExPeptide, iExPeptide, iPrecursor, iProduct, iExPeptide, iExPeptide, iExPeptide);
return new ExPeptideRowReader(provider, separator, indices, exPeptideRegex, settings, lines, progressMonitor, status);
}
private static int FindExPeptide(string[] fields, Regex exPeptideRegex, SrmSettings settings,
out string sequence, out string modifiedSequence, out IsotopeLabelType labelType)
{
labelType = IsotopeLabelType.light;
for (int i = 0; i < fields.Length; i++)
{
Match match = exPeptideRegex.Match(fields[i]);
if (match.Success)
{
string sequencePart = GetSequence(match);
if (FastaSequence.IsExSequence(sequencePart))
{
sequence = sequencePart;
modifiedSequence = GetModifiedSequence(match);
labelType = GetLabelType(match, settings);
return i;
}
// Very strange case where there is a match, but it
// doesn't have a peptide in the second group.
break;
}
}
sequence = null;
modifiedSequence = null;
return -1;
}
private static string GetProteinName(Match match)
{
return match.Groups[1].Value;
}
private static string GetSequence(Match match)
{
return FastaSequence.StripModifications(GetModifiedSequence(match));
}
private static string GetModifiedSequence(Match match)
{
return match.Groups[2].Value.Replace('_', '.');
}
private static IsotopeLabelType GetLabelType(Match pepExMatch, SrmSettings settings)
{
var modSettings = settings.PeptideSettings.Modifications;
var typedMods = modSettings.GetModificationsByName(pepExMatch.Groups[3].Value);
return (typedMods != null ? typedMods.LabelType : IsotopeLabelType.light);
}
private static IEnumerable<string> GetSequencesFromLines(IEnumerable<string> lines, char separator, ColumnIndices indices, Regex exPeptideRegex)
{
return lines.Select(line => GetModifiedSequence(exPeptideRegex.Match(line.ParseDsvFields(separator)[indices.PeptideColumn])));
}
}
public static bool IsColumnar(string text,
out IFormatProvider provider, out char sep, out Type[] columnTypes)
{
provider = CultureInfo.InvariantCulture;
sep = '\0';
columnTypes = new Type[0];
int endLine = text.IndexOf('\n');
string line = (endLine != -1 ? text.Substring(0, endLine) : text);
// Avoid reporting a crosslink peptide specification as columnar just because they can contain commas
if (CrosslinkSequenceParser.TryParseCrosslinkLibraryKey(line.Trim(), 0) != null)
return false;
// Work out the column separator and the column strings
string[] columns;
if (TrySplitColumns(line, TextUtil.SEPARATOR_TSV, out columns))
{
sep = TextUtil.SEPARATOR_TSV;
}
else
{
bool hasCommaColumns = TrySplitColumns(line, TextUtil.SEPARATOR_CSV, out columns);
bool hasSemiColumns = TrySplitColumns(line, TextUtil.SEPARATOR_CSV_INTL, out var semiColumns);
if (hasCommaColumns && hasSemiColumns)
sep = columns.Length >= semiColumns.Length ? TextUtil.SEPARATOR_CSV : TextUtil.SEPARATOR_CSV_INTL;
else if (hasCommaColumns)
sep = TextUtil.SEPARATOR_CSV;
else if (hasSemiColumns)
sep = TextUtil.SEPARATOR_CSV_INTL;
if (sep == TextUtil.SEPARATOR_CSV_INTL)
columns = semiColumns;
}
if (sep == '\0')
return false;
if (sep != TextUtil.SEPARATOR_CSV)
{
// Test for the right decimal separator when the list separator is not a comma
var culture = CultureInfo.CurrentCulture;
// If the local decimal separator is not a comma, then try that. Otherwise, try a comma.
if (Equals(culture.NumberFormat.NumberDecimalSeparator,
CultureInfo.InvariantCulture.NumberFormat.NumberDecimalSeparator))
{
culture = (CultureInfo)CultureInfo.InvariantCulture.Clone();
var nf = culture.NumberFormat;
nf.NumberDecimalSeparator = nf.CurrencyDecimalSeparator = nf.PercentDecimalSeparator = @",";
nf.NumberGroupSeparator = nf.CurrencyGroupSeparator = nf.PercentGroupSeparator = @".";
culture.TextInfo.ListSeparator = sep.ToString();
}
// The decimal separator that appears in the most columns wins
if (CountDecimals(columns, culture) > CountDecimals(columns, provider))
provider = culture;
}
List<Type> listColumnTypes = new List<Type>();
bool nonSeqFound = !char.IsWhiteSpace(sep); // Sequence text is allowed to have white space
foreach (string value in columns)
{
Type columnType = GetColumnType(value.Trim(), provider);
if (columnType != typeof(FastaSequence))
nonSeqFound = true;
listColumnTypes.Add(columnType);
}
columnTypes = (nonSeqFound ? listColumnTypes.ToArray() : new Type[0]);
return nonSeqFound;
}
private static int CountDecimals(IEnumerable<string> values, IFormatProvider provider)
{
int n = 0;
foreach (string value in values)
{
double result;
if (double.TryParse(value, NumberStyles.Number, provider, out result) && result != Math.Round(result))
{
n++;
}
}
return n;
}
private static bool TrySplitColumns(string line, char sep, out string[] columns)
{
columns = line.Split(sep);
return columns.Length > 1;
}
private static Type GetColumnType(string value, IFormatProvider provider)
{
double result;
if (double.TryParse(value, NumberStyles.Number, provider, out result))
return typeof(double);
else if (FastaSequence.IsExSequence(value))
return typeof(FastaSequence);
return typeof(string);
}
public static bool HasNumericColumn(Type[] columnTypes)
{
return columnTypes.IndexOf(colType => colType == typeof(double)) != -1;
}
}
/// <summary>
/// Known indices of the columns used in importing a transition list.
/// </summary>
public sealed class ColumnIndices
{
public ColumnIndices(int proteinColumn, int peptideColumn, int precursorColumn, int productColumn)
:this()
{
AssignDetected(proteinColumn, peptideColumn, precursorColumn, productColumn, -1, -1, -1);
}
public void AssignDetected(int proteinColumn,
int peptideColumn,
int precursorColumn,
int productColumn,
int labelTypeColumn,
int fragmentNameColumn,
int precursorChargeColumn)
{
ProteinColumn = proteinColumn;
PeptideColumn = peptideColumn;
PrecursorColumn = precursorColumn;
ProductColumn = productColumn;
LabelTypeColumn = labelTypeColumn;
FragmentNameColumn = fragmentNameColumn;
PrecursorChargeColumn = precursorChargeColumn;
}
public string[] Headers { get; private set; }
public int ProteinColumn { get; set; }
public int PeptideColumn { get; set; }
public int PrecursorColumn { get; set; }
public int PrecursorChargeColumn { get; set; }
public int ProductColumn { get; set; }
public int ProductChargeColumn { get; set; }
/// <summary>
/// A column specifying the <see cref="IsotopeLabelType"/> (optional)
/// </summary>
public int LabelTypeColumn { get; set; }
/// <summary>
/// A column specifying whether a decoy is expected (optional)
/// </summary>
public int DecoyColumn { get; set; }
/// <summary>
/// A column specifying a fragment name (optional)
/// </summary>
public int FragmentNameColumn { get; set; }
/// <summary>
/// A column specifying an iRT value
/// </summary>
public int IrtColumn { get; set; }
/// <summary>
/// A column specifying a spectral library intensity for the transition
/// </summary>
public int LibraryColumn { get; set; }
private ColumnIndices()
{
ProteinColumn = -1;
PeptideColumn = -1;
PrecursorColumn = -1;
PrecursorChargeColumn = -1;
ProductColumn = -1;
ProductChargeColumn = -1;
DecoyColumn = -1;
IrtColumn = -1;
LibraryColumn = -1;
LabelTypeColumn = -1;
FragmentNameColumn = -1;
}
public static ColumnIndices FromLine(string line, char separator, Func<string, Type> getColumnType)
{
var ci = new ColumnIndices();
string[] fields = line.ParseDsvFields(separator);
if (fields.All(field => getColumnType(field.Trim()) != typeof(double)))
ci.FindColumns(fields);
return ci;
}
private string FormatHeader(string col)
{
// Remove spaces and make lowercase. This matches the format of the names they are tested against
return col.ToLowerInvariant().Replace(@" ", string.Empty);
}
public void FindColumns(string[] headers)
{
Headers = headers;
ProteinColumn = headers.IndexOf(col => ProteinNames.Contains(FormatHeader(col)));
PrecursorChargeColumn = headers.IndexOf(col => PrecursorChargeNames.Contains(FormatHeader(col)));
ProductChargeColumn = headers.IndexOf(col => ProductChargeNames.Contains(FormatHeader(col)));
DecoyColumn = headers.IndexOf(col => DecoyNames.Contains(FormatHeader(col)));
IrtColumn = headers.IndexOf(col => IrtColumnNames.Contains(FormatHeader(col)));
LibraryColumn = headers.IndexOf(col => LibraryColumnNames.Contains(FormatHeader(col)));
LabelTypeColumn = headers.IndexOf(col => LabelTypeNames.Contains(FormatHeader(col)));
FragmentNameColumn = headers.IndexOf(col => FragmentNameNames.Contains(FormatHeader(col)));
}
// Checks all the column indices and resets any that have the given index to -1
public void ResetDuplicateColumns(int index)
{
if (DecoyColumn == index)
DecoyColumn = -1;
if (IrtColumn == index)
IrtColumn = -1;
if (LabelTypeColumn == index)
LabelTypeColumn = -1;
if (LibraryColumn == index)
LibraryColumn = -1;
if (PeptideColumn == index)
PeptideColumn = -1;
if (PrecursorColumn == index)
PrecursorColumn = -1;
if (ProductColumn == index)
ProductColumn = -1;
if (ProteinColumn == index)
ProteinColumn = -1;
if (FragmentNameColumn == index)
FragmentNameColumn = -1;
if (PrecursorChargeColumn == index)
PrecursorChargeColumn = -1;
}
/// <summary>
/// It's not unusual for a single column to hold a few fields worth of info, as in
/// "744.8 858.39 10 APR.AGLCQTFVYGGCR.y7.light 105 40" where protein, peptide, and label are all stuck together,
/// so that all three lay claim to a single column. In such cases, prioritize peptide.
/// </summary>
public void PrioritizePeptideColumn()
{
if (PeptideColumn != -1)
{
var save = PeptideColumn;
ResetDuplicateColumns(PeptideColumn);
PeptideColumn = save;
}
}
// ReSharper disable StringLiteralTypo
public static IEnumerable<string> ProteinNames { get { return new[] { @"proteinname", @"protein.name", @"protein", @"proteinid", @"uniprotid" }; } }
public static IEnumerable<string> PrecursorChargeNames { get { return new[] { @"precursorcharge" }; } }
public static IEnumerable<string> ProductChargeNames { get { return new[] { @"productcharge" }; } }
public static IEnumerable<string> IrtColumnNames { get { return new[] { @"irt", @"normalizedretentiontime", @"tr_recalibrated" }; } }
public static IEnumerable<string> LibraryColumnNames { get { return new[] { @"libraryintensity", @"relativeintensity", @"relative_intensity", @"relativefragmentintensity", @"library_intensity" }; } }
public static IEnumerable<string> DecoyNames { get { return new[] { @"decoy" }; } }
public static IEnumerable<string> FragmentNameNames { get { return new[] { @"fragmentname" }; } }
public static IEnumerable<string> LabelTypeNames { get { return new[] { @"labeltype" }; } }
// ReSharper restore StringLiteralTypo
}
/// <summary>
/// All possible explanations for a single transition
/// </summary>
public sealed class ExTransitionInfo
{
public ExTransitionInfo(string proteinName, string peptideSequence, string modifiedSequence, double precursorMz, bool isDecoy)
{
ProteinName = proteinName;
PeptideTarget = new Target(peptideSequence);
ModifiedSequence = modifiedSequence;
PrecursorMz = precursorMz;
IsDecoy = isDecoy;
DefaultLabelType = IsotopeLabelType.light;
TransitionExps = new List<TransitionExp>();
}
public string ProteinName { get; private set; }
public Target PeptideTarget { get; private set; }
public string PeptideSequence { get { return PeptideTarget.Sequence; } }
public string ModifiedSequence { get; set; }
public double PrecursorMz { get; private set; }
public bool IsDecoy { get; private set; }
/// <summary>
/// The first label type to try in explaining the precursor m/z value
/// </summary>
public IsotopeLabelType DefaultLabelType { get; set; }
/// <summary>
/// True if only the default label type is allowed
/// </summary>
public bool IsExplicitLabelType { get; set; }
/// <summary>
/// A list of potential explanations for the Q1 and Q3 m/z values
/// </summary>
public List<TransitionExp> TransitionExps { get; private set; }
public IEnumerable<ExplicitMods> PotentialVarMods
{
get { return TransitionExps.Select(exp => exp.Precursor.VariableMods).Distinct(); }
}
}
/// <summary>
/// Explanation for a single transition
/// </summary>
public sealed class TransitionExp
{
public TransitionExp(ExplicitMods mods, Adduct precursorCharge, IsotopeLabelType labelType, int precursorMassShift)
{
Precursor = new PrecursorExp(mods, precursorCharge, labelType, precursorMassShift);
}
public bool IsDecoy { get { return Precursor.MassShift.HasValue; } }
public TransitionCalc.MassShiftType ProductShiftType
{
get
{
return IsDecoy
? TransitionCalc.MassShiftType.either
: TransitionCalc.MassShiftType.none;
}
}
public PrecursorExp Precursor { get; private set; }
public ProductExp Product { get; set; }
}
public sealed class PrecursorExp
{
public PrecursorExp(ExplicitMods mods, Adduct precursorAdduct, IsotopeLabelType labelType, int massShift)
{
VariableMods = mods;
PrecursorAdduct = precursorAdduct;
LabelType = labelType;
MassShift = null;
if (massShift != 0)
MassShift = massShift;
}
public ExplicitMods VariableMods { get; private set; }
public Adduct PrecursorAdduct { get; private set; }
public IsotopeLabelType LabelType { get; private set; }
public int? MassShift { get; private set; }
#region object overrides
public bool Equals(PrecursorExp other)
{
if (ReferenceEquals(null, other)) return false;
if (ReferenceEquals(this, other)) return true;
return Equals(other.VariableMods, VariableMods) &&
Equals(other.PrecursorAdduct, PrecursorAdduct) &&
Equals(other.LabelType, LabelType);
}
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
if (obj.GetType() != typeof (PrecursorExp)) return false;
return Equals((PrecursorExp) obj);
}
public override int GetHashCode()
{
unchecked
{
int result = (VariableMods != null ? VariableMods.GetHashCode() : 0);
result = (result*397) ^ PrecursorAdduct.GetHashCode();
result = (result*397) ^ (LabelType != null ? LabelType.GetHashCode() : 0);
return result;
}
}
#endregion
}
public sealed class ProductExp
{
public ProductExp(Adduct productAdduct, IonType ionType, int fragmentOrdinal, TransitionLosses losses, int massShift)
{
Adduct = productAdduct;
IonType = ionType;
FragmentOrdinal = fragmentOrdinal;
Losses = losses;
MassShift = null;
if (massShift != 0)
MassShift = massShift;
}
public Adduct Adduct { get; private set; }
public IonType IonType { get; private set; }
public int FragmentOrdinal { get; private set; }
public TransitionLosses Losses { get; private set; }
public int? MassShift { get; private set; }
}
public class MzMatchException : LineColNumberedIoException
{
public MzMatchException(string message, long lineNum, int colNum)
: base(message, TextUtil.LineSeparate(string.Empty, Resources.MzMatchException_suggestion), lineNum, colNum)
{ }
}
public class LineColNumberedIoException : IOException
{
public LineColNumberedIoException(string message, long lineNum, int colIndex)
: base(FormatMessage(message, lineNum, colIndex))
{
PlainMessage = message;
LineNumber = lineNum;
ColumnIndex = colIndex;
}
public LineColNumberedIoException(string message, string suggestion, long lineNum, int colIndex)
: base(TextUtil.LineSeparate(FormatMessage(message, lineNum, colIndex), suggestion))
{
PlainMessage = TextUtil.LineSeparate(message, suggestion);
LineNumber = lineNum;
ColumnIndex = colIndex;
}
public LineColNumberedIoException(string message, long lineNum, int colIndex, Exception inner)
: base(FormatMessage(message, lineNum, colIndex), inner)
{
PlainMessage = message;
LineNumber = lineNum;
ColumnIndex = colIndex;
}
private static string FormatMessage(string message, long lineNum, int colIndex)
{
if (colIndex == -1)
return string.Format(Resources.LineColNumberedIoException_FormatMessage__0___line__1__, message, lineNum);
else
return string.Format(Resources.LineColNumberedIoException_FormatMessage__0___line__1___col__2__, message, lineNum, colIndex + 1);
}
public string PlainMessage { get; private set; }
public long LineNumber { get; private set; }
public int ColumnIndex { get; private set; }
}
public class PeptideGroupBuilder
{
// filename to use if no file has been specified
public const string CLIPBOARD_FILENAME = @"Clipboard";
private readonly StringBuilder _sequence = new StringBuilder();
private readonly List<PeptideDocNode> _peptides;
private readonly Dictionary<int, Adduct> _charges;
private readonly SrmSettings _settings;
private readonly Enzyme _enzyme;
private readonly bool _customName;
private FastaSequence _activeFastaSeq;
private Peptide _activePeptide;
private string _activeModifiedSequence;
private readonly string _sourceFile;
// Order is important to making the variable modification choice deterministic
// when more than one potential set of variable modifications work to explain
// the contents of the active peptide.
private List<ExplicitMods> _activeVariableMods;
private List<PrecursorExp> _activePrecursorExps;
private double _activePrecursorMz;
private readonly List<ExTransitionInfo> _activeTransitionInfos;
private double? _irtValue;
private readonly List<MeasuredRetentionTime> _irtPeptides;
private readonly List<TransitionImportErrorInfo> _peptideGroupErrorInfo;
private readonly List<TransitionGroupLibraryIrtTriple> _groupLibTriples;
private readonly List<SpectrumMzInfo> _librarySpectra;
private readonly List<SpectrumPeaksInfo.MI> _activeLibraryIntensities;
private readonly ModificationMatcher _modMatcher;
private bool _autoManageChildren;
public PeptideGroupBuilder(FastaSequence fastaSequence, SrmSettings settings, string sourceFile)
{
_activeFastaSeq = fastaSequence;
_autoManageChildren = true;
if (fastaSequence != null)
{
BaseName = Name = fastaSequence.Name;
Description = fastaSequence.Description;
Alternatives = fastaSequence.Alternatives.ToArray();
}
_settings = settings;
_enzyme = _settings.PeptideSettings.Enzyme;
_peptides = new List<PeptideDocNode>();
_charges = new Dictionary<int, Adduct>();
_groupLibTriples = new List<TransitionGroupLibraryIrtTriple>();
_activeTransitionInfos = new List<ExTransitionInfo>();
_irtPeptides = new List<MeasuredRetentionTime>();
_librarySpectra = new List<SpectrumMzInfo>();
_activeLibraryIntensities = new List<SpectrumPeaksInfo.MI>();
_peptideGroupErrorInfo = new List<TransitionImportErrorInfo>();
_activeModifiedSequence = null;
_sourceFile = sourceFile;
}
public PeptideGroupBuilder(string line, bool peptideList, SrmSettings settings, string sourceFile)
: this(null, settings, sourceFile)
{
int start = (line.Length > 0 && line[0] == '>' ? 1 : 0);
// If there is a second >, then this is a custom name, and not
// a real FASTA sequence.
if (line.Length > 1 && line[1] == '>')
{
_customName = true;
start++;
}
// Split ID from description at first space or tab
int split = _customName ? -1 : IndexEndId(line);
if (split == -1)
{
BaseName = Name = line.Substring(start);
Description = string.Empty;
}
else
{
BaseName = Name = line.Substring(start, split - start);
string[] descriptions = line.Substring(split + 1).Split((char)1);
Description = descriptions[0];
var listAlternatives = new List<ProteinMetadata>();
for (int i = 1; i < descriptions.Length; i++)
{
string alternative = descriptions[i];
split = IndexEndId(alternative);
if (split == -1)
listAlternatives.Add(new ProteinMetadata(alternative, null));
else
{
listAlternatives.Add(new ProteinMetadata(alternative.Substring(0, split),
alternative.Substring(split + 1)));
}
}
Alternatives = listAlternatives.ToArray();
}
PeptideList = peptideList;
}
public PeptideGroupBuilder(string line, ModificationMatcher modMatcher, SrmSettings settings, string sourceFile)
: this(line, true, settings, sourceFile)
{
_modMatcher = modMatcher;
}
private static int IndexEndId(string line)
{
return line.IndexOfAny(new[] { TextUtil.SEPARATOR_SPACE, TextUtil.SEPARATOR_TSV });
}
/// <summary>
/// Used in the case where the user supplied name may be different
/// from the <see cref="Name"/> property.
/// </summary>
public string BaseName { get; set; }
public List<MeasuredRetentionTime> IrtPeptides { get { return _irtPeptides; } }
public List<SpectrumMzInfo> LibrarySpectra { get { return _librarySpectra; } }
public List<TransitionImportErrorInfo> PeptideGroupErrorInfo { get { return _peptideGroupErrorInfo; } }
public string Name { get; private set; }
public string Description { get; private set; }
public ProteinMetadata[] Alternatives { get; private set; }
public string AA
{
get
{
return _sequence.ToString();
}
set
{
_sequence.Remove(0, _sequence.Length);
_sequence.Append(value);
}
}
public bool PeptideList { get; private set; }
public void AppendSequence(string seqMod)
{
var charge = Transition.GetChargeFromIndicator(seqMod, TransitionGroup.MIN_PRECURSOR_CHARGE, TransitionGroup.MAX_PRECURSOR_CHARGE);
seqMod = Transition.StripChargeIndicators(seqMod, TransitionGroup.MIN_PRECURSOR_CHARGE, TransitionGroup.MAX_PRECURSOR_CHARGE);
var seq = FastaSequence.StripModifications(seqMod);
// Auto manage the children unless there is at least one modified sequence in the fasta
_autoManageChildren = _autoManageChildren && Equals(seq, seqMod);
// Get rid of whitespace
seq = seq.Replace(@" ", string.Empty).Trim();
// Get rid of
if (seq.EndsWith(@"*"))
seq = seq.Substring(0, seq.Length - 1);
if (!PeptideList)
_sequence.Append(seq);
else
{
// If there is a ModificationMatcher, use it to create the DocNode.
PeptideDocNode nodePep;
if (_modMatcher != null)
nodePep = _modMatcher.GetModifiedNode(seqMod);
else
{
Peptide peptide = new Peptide(null, seq, null, null, _enzyme.CountCleavagePoints(seq));
nodePep = new PeptideDocNode(peptide);
}
_peptides.Add(nodePep);
if (!charge.IsEmpty)
_charges.Add(nodePep.Id.GlobalIndex, charge);
}
}
public void AppendTransition(ExTransitionInfo info, double? irt, double? libraryIntensity, double productMz, string lineText, long lineNum)
{
_autoManageChildren = false;
// Treat this like a peptide list from now on.
PeptideList = true;
if (_activeFastaSeq == null && AA.Length > 0)
_activeFastaSeq = new FastaSequence(Name, Description, Alternatives, AA);
string sequence = info.PeptideSequence;
if (_activePeptide != null)
{
if (IsPeptideChanged(info))
{
CompletePeptide(true);
}
else
{
var intersectVariableMods = new List<ExplicitMods>(_activeVariableMods.Intersect(
info.PotentialVarMods));
// If unable to explain the next transition with the existing peptide, but the
// transition has the same precursor m/z as the last, try completing the existing
// peptide, and see if the current precursor can be completed as a new peptide
if (intersectVariableMods.Count == 0 && _activePrecursorMz == info.PrecursorMz)
{
CompletePeptide(false);
intersectVariableMods = new List<ExplicitMods>(info.PotentialVarMods);
foreach (var infoActive in _activeTransitionInfos)
{
intersectVariableMods = new List<ExplicitMods>(intersectVariableMods.Intersect(
infoActive.PotentialVarMods));
}
}
if (intersectVariableMods.Count > 0)
{
_activeVariableMods = intersectVariableMods;
}
else if (_activePrecursorMz == info.PrecursorMz)
{
var precursorMz = Math.Round(info.PrecursorMz, MassListImporter.MZ_ROUND_DIGITS);
var errorInfo = new TransitionImportErrorInfo(string.Format(Resources.PeptideGroupBuilder_AppendTransition_Failed_to_explain_all_transitions_for_0__m_z__1__with_a_single_set_of_modifications,
info.PeptideSequence, precursorMz),
null,
lineNum, lineText);
_peptideGroupErrorInfo.Add(errorInfo);
return;
}
else
{
CompletePeptide(true);
}
}
}
if (_activePeptide == null)
{
int? begin = null;
int? end = null;
if (_activeFastaSeq != null)
{
begin = _activeFastaSeq.Sequence.IndexOf(sequence, StringComparison.Ordinal);
if (begin == -1)
{
// CONSIDER: Use fasta sequence format code currently in SrmDocument to show formatted sequence.
throw new InvalidDataException(string.Format(Resources.PeptideGroupBuilder_AppendTransition_The_peptide__0__was_not_found_in_the_sequence__1__,
sequence, _activeFastaSeq.Name));
}
end = begin + sequence.Length;
}
_activePeptide = new Peptide(_activeFastaSeq, sequence, begin, end, _enzyme.CountCleavagePoints(sequence), info.TransitionExps[0].IsDecoy);
_activeModifiedSequence = info.ModifiedSequence;
_activePrecursorMz = info.PrecursorMz;
_activeVariableMods = new List<ExplicitMods>(info.PotentialVarMods.Distinct());
_activePrecursorExps = new List<PrecursorExp>(info.TransitionExps.Select(exp => exp.Precursor));
}
var intersectPrecursors = new List<PrecursorExp>(_activePrecursorExps.Intersect(
info.TransitionExps.Select(exp => exp.Precursor)));
if (intersectPrecursors.Count > 0)
{
_activePrecursorExps = intersectPrecursors;
}
else if (_activePrecursorMz == info.PrecursorMz)
{
var precursorMz = Math.Round(_activePrecursorMz, MassListImporter.MZ_ROUND_DIGITS);
var errorInfo = new TransitionImportErrorInfo(string.Format(Resources.PeptideGroupBuilder_AppendTransition_Failed_to_explain_all_transitions_for_m_z__0___peptide__1___with_a_single_precursor,
precursorMz, info.PeptideSequence),
null,
lineNum, lineText);
_peptideGroupErrorInfo.Add(errorInfo);
return;
}
else
{
CompleteTransitionGroup();
}
if (_irtValue.HasValue && (irt == null || Math.Abs(_irtValue.Value - irt.Value) > DbIrtPeptide.IRT_MIN_DIFF))
{
var precursorMz = Math.Round(info.PrecursorMz, MassListImporter.MZ_ROUND_DIGITS);
var errorInfo = new TransitionImportErrorInfo(string.Format(Resources.PeptideGroupBuilder_FinalizeTransitionGroups_Two_transitions_of_the_same_precursor___0___m_z__1_____have_different_iRT_values___2__and__3___iRT_values_must_be_assigned_consistently_in_an_imported_transition_list_,
info.PeptideSequence, precursorMz, _irtValue, irt),
null,
lineNum, lineText);
_peptideGroupErrorInfo.Add(errorInfo);
return;
}
if (_activePrecursorMz == 0)
{
_activePrecursorMz = info.PrecursorMz;
_activePrecursorExps = new List<PrecursorExp>(info.TransitionExps.Select(exp => exp.Precursor));
}
_activeTransitionInfos.Add(info);
if (libraryIntensity != null)
{
_activeLibraryIntensities.Add(new SpectrumPeaksInfo.MI { Intensity = (float)libraryIntensity.Value, Mz = productMz });
}
_irtValue = irt;
}
/// <summary>
/// If the bare peptide sequence has changed, definitely start a new peptide
/// If the modified sequence has changed, this is more ambiguous, since
/// Skyline may have just failed to parse the modified sequence. Only start new
/// peptide if modified sequences are parsed and different.
/// </summary>
/// <param name="info">List of transition explanations</param>
/// <returns></returns>
private bool IsPeptideChanged(ExTransitionInfo info)
{
if (info.ModifiedSequence == null && _activeModifiedSequence == null)
return !Equals(info.PeptideSequence, _activePeptide.Sequence);
return !Equals(info.ModifiedSequence, _activeModifiedSequence);
}
private void CompletePeptide(bool andTransitionGroup)
{
if (andTransitionGroup)
CompleteTransitionGroup();
_groupLibTriples.Sort(TransitionGroupLibraryIrtTriple.CompareTriples);
var finalGroupLibTriples = FinalizeTransitionGroups(_groupLibTriples);
var finalTransitionGroups = finalGroupLibTriples.Select(triple => triple.NodeGroup).ToArray();
var docNode = new PeptideDocNode(_activePeptide, _settings, _activeVariableMods[0], null, null,
finalTransitionGroups, false);
var finalLibrarySpectra = new List<SpectrumMzInfo>();
double? peptideIrt = GetPeptideIrt(finalGroupLibTriples);
foreach (var groupLibTriple in finalGroupLibTriples)
{
if (groupLibTriple.SpectrumInfo == null)
continue;
var sequence = groupLibTriple.NodeGroup.TransitionGroup.Peptide.Target;
var mods = docNode.ExplicitMods;
var calcPre = _settings.GetPrecursorCalc(groupLibTriple.SpectrumInfo.Label, mods);
var modifiedSequenceWithIsotopes = calcPre.GetModifiedSequence(sequence, SequenceModFormatType.lib_precision, false);
finalLibrarySpectra.Add(new SpectrumMzInfo
{
SourceFile = _sourceFile ?? CLIPBOARD_FILENAME,
Key = new LibKey(modifiedSequenceWithIsotopes, groupLibTriple.NodeGroup.TransitionGroup.PrecursorAdduct),
Label = groupLibTriple.SpectrumInfo.Label,
PrecursorMz = groupLibTriple.SpectrumInfo.PrecursorMz,
IonMobility = groupLibTriple.SpectrumInfo.IonMobility,
SpectrumPeaks = groupLibTriple.SpectrumInfo.SpectrumPeaks,
RetentionTime = peptideIrt
});
}
_librarySpectra.AddRange(finalLibrarySpectra);
_peptides.Add(docNode);
if (peptideIrt.HasValue)
{
_irtPeptides.Add(new MeasuredRetentionTime(docNode.ModifiedTarget, peptideIrt.Value, true));
}
_groupLibTriples.Clear();
// Keep the same peptide, if the group is not being completed.
// This is an attempt to explain a set of transitions with the same
// peptide, but different variable modifications.
if (andTransitionGroup)
_activePeptide = null;
else
{
// Not valid to keep the same actual peptide. Need a copy.
_activePeptide = new Peptide(_activePeptide.FastaSequence,
_activePeptide.Sequence,
_activePeptide.Begin,
_activePeptide.End,
_activePeptide.MissedCleavages,
_groupLibTriples.Any(pair => pair.NodeGroup.IsDecoy));
}
_irtValue = null;
}
private static double? GetPeptideIrt(IEnumerable<TransitionGroupLibraryIrtTriple> groupTriples)
{
var groupTriplesNonNull = groupTriples.Where(triple => triple.Irt.HasValue).ToList();
if (!groupTriplesNonNull.Any())
{
return null;
}
double weightedSum = groupTriplesNonNull.Select(triple => triple.Irt.Value).Sum();
double norm = groupTriplesNonNull.Count;
return weightedSum / norm;
}
private TransitionGroupLibraryIrtTriple[] FinalizeTransitionGroups(IList<TransitionGroupLibraryIrtTriple> groupTriples)
{
var finalTriples = new List<TransitionGroupLibraryIrtTriple>();
foreach (var groupTriple in groupTriples)
{
int iGroup = finalTriples.Count - 1;
if (iGroup == -1 || !Equals(finalTriples[iGroup].NodeGroup.TransitionGroup, groupTriple.NodeGroup.TransitionGroup))
finalTriples.Add(groupTriple);
else
{
// Check for consistent iRT values
double? irt1 = finalTriples[iGroup].Irt;
double? irt2 = groupTriple.Irt;
bool bothIrtsNull = (irt1 == null && irt2 == null);
if (!bothIrtsNull && (irt1 == null || irt2 == null))
{
for (int i = 0; i < groupTriple.NodeGroup.TransitionCount; ++i)
{
var precursorMz = Math.Round(groupTriple.PrecursorMz, MassListImporter.MZ_ROUND_DIGITS);
var errorInfo = new TransitionImportErrorInfo(string.Format(Resources.PeptideGroupBuilder_FinalizeTransitionGroups_Missing_iRT_value_for_peptide__0___precursor_m_z__1_,
_activePeptide.Sequence, precursorMz),
null, null, null);
_peptideGroupErrorInfo.Add(errorInfo);
}
continue;
}
else if (!bothIrtsNull && Math.Abs(irt1.Value - irt2.Value) > DbIrtPeptide.IRT_MIN_DIFF)
{
// Make sure iRT values are reported in a deterministic order for testing
if (irt1.Value > irt2.Value)
Helpers.Swap(ref irt1, ref irt2);
for (int i = 0; i < groupTriple.NodeGroup.TransitionCount; ++i)
{
var precursorMz = Math.Round(groupTriple.PrecursorMz, MassListImporter.MZ_ROUND_DIGITS);
var errorInfo = new TransitionImportErrorInfo(string.Format(Resources.PeptideGroupBuilder_FinalizeTransitionGroups_Two_transitions_of_the_same_precursor___0___m_z__1_____have_different_iRT_values___2__and__3___iRT_values_must_be_assigned_consistently_in_an_imported_transition_list_,
_activePeptide.Sequence, precursorMz, irt1.Value, irt2.Value),
null, null, null);
_peptideGroupErrorInfo.Add(errorInfo);
}
continue;
}
// Found repeated group, so merge transitions
var spectrumErrors = new List<TransitionImportErrorInfo>();
finalTriples[iGroup].SpectrumInfo = finalTriples[iGroup].SpectrumInfo == null ? groupTriple.SpectrumInfo
: finalTriples[iGroup].SpectrumInfo.CombineSpectrumInfo(groupTriple.SpectrumInfo, out spectrumErrors);
if (spectrumErrors.Any())
{
_peptideGroupErrorInfo.AddRange(spectrumErrors);
continue;
}
finalTriples[iGroup].NodeGroup = (TransitionGroupDocNode)finalTriples[iGroup].NodeGroup.AddAll(groupTriple.NodeGroup.Children);
}
}
var groups = groupTriples.Select(pair => pair.NodeGroup).ToList();
var finalGroups = finalTriples.Select(pair => pair.NodeGroup).ToList();
// If anything changed, make sure transitions are sorted
if (!ArrayUtil.ReferencesEqual(groups, finalGroups))
{
for (int i = 0; i < finalTriples.Count; i++)
{
var nodeGroup = finalTriples[i].NodeGroup;
var arrayTran = CompleteTransitions(nodeGroup.Children.Cast<TransitionDocNode>());
finalTriples[i].NodeGroup = (TransitionGroupDocNode)nodeGroup.ChangeChildrenChecked(arrayTran);
}
}
return finalTriples.ToArray();
}
private void CompleteTransitionGroup()
{
var precursorExp = GetBestPrecursorExp();
var transitionGroup = new TransitionGroup(_activePeptide,
precursorExp.PrecursorAdduct,
precursorExp.LabelType,
false,
precursorExp.MassShift);
var transitions = _activeTransitionInfos.ConvertAll(info =>
{
var productExp = info.TransitionExps.Single(exp => Equals(precursorExp, exp.Precursor)).Product;
var ionType = productExp.IonType;
var ordinal = productExp.FragmentOrdinal;
int offset = Transition.OrdinalToOffset(ionType, ordinal, _activePeptide.Sequence.Length);
int? massShift = productExp.MassShift;
if (massShift == null && precursorExp.MassShift.HasValue)
massShift = 0;
var tran = new Transition(transitionGroup, ionType, offset, 0, productExp.Adduct, massShift);
// m/z and library info calculated later
return new TransitionDocNode(tran, productExp.Losses, TypedMass.ZERO_MONO_MASSH, TransitionDocNode.TransitionQuantInfo.DEFAULT, ExplicitTransitionValues.EMPTY);
});
// m/z calculated later
var newTransitionGroup = new TransitionGroupDocNode(transitionGroup, CompleteTransitions(transitions));
var currentLibrarySpectrum = !_activeLibraryIntensities.Any() ? null :
new SpectrumMzInfo
{
Key = new LibKey(_activePeptide.Sequence, precursorExp.PrecursorAdduct),
PrecursorMz = _activePrecursorMz,
Label = precursorExp.LabelType,
SpectrumPeaks = new SpectrumPeaksInfo(_activeLibraryIntensities.ToArray()),
};
_groupLibTriples.Add(new TransitionGroupLibraryIrtTriple(currentLibrarySpectrum, newTransitionGroup, _irtValue, _activePrecursorMz));
_activePrecursorMz = 0;
_activePrecursorExps.Clear();
_activeTransitionInfos.Clear();
_activeLibraryIntensities.Clear();
_irtValue = null;
}
private PrecursorExp GetBestPrecursorExp()
{
// If there is only one precursor explanation, return it
if (_activePrecursorExps.Count == 1)
return _activePrecursorExps[0];
// Unless the explanation comes from just one transition, then look for most reasonable given settings
int[] fragmentTypeCounts = new int[_activePrecursorExps.Count];
var preferredFragments = new List<IonType>();
foreach (var ionType in _settings.TransitionSettings.Filter.PeptideIonTypes)
{
if (preferredFragments.Contains(ionType))
continue;
preferredFragments.Add(ionType);
// Add ion type pairs together, whether they are both in the settings or not
switch (ionType)
{
case IonType.a: preferredFragments.Add(IonType.x); break;
case IonType.b: preferredFragments.Add(IonType.y); break;
case IonType.c: preferredFragments.Add(IonType.z); break;
case IonType.x: preferredFragments.Add(IonType.a); break;
case IonType.y: preferredFragments.Add(IonType.b); break;
case IonType.z: preferredFragments.Add(IonType.c); break;
}
}
// Count transitions with the preferred types for all possible precursors
foreach (var tranExp in _activeTransitionInfos.SelectMany(info => info.TransitionExps))
{
int i = _activePrecursorExps.IndexOf(tranExp.Precursor);
if (i == -1)
continue;
if (preferredFragments.Contains(tranExp.Product.IonType))
fragmentTypeCounts[i]++;
}
// Return the precursor with the most fragments of the preferred type
var maxExps = fragmentTypeCounts.Max();
return _activePrecursorExps[fragmentTypeCounts.IndexOf(c => c == maxExps)];
}
/// <summary>
/// Remove duplicates and sort a set of transitions.
/// </summary>
/// <param name="transitions">The set of transitions</param>
/// <returns>An array of sorted, distinct transitions</returns>
private static TransitionDocNode[] CompleteTransitions(IEnumerable<TransitionDocNode> transitions)
{
var arrayTran = transitions.Distinct().ToArray();
Array.Sort(arrayTran, TransitionGroup.CompareTransitions);
return arrayTran;
}
public PeptideGroupDocNode ToDocNode()
{
PeptideGroupDocNode nodePepGroup;
SrmSettingsDiff diff = SrmSettingsDiff.ALL;
if (PeptideList)
{
if (_activePeptide != null)
{
CompletePeptide(true);
diff = SrmSettingsDiff.PROPS;
}
nodePepGroup = new PeptideGroupDocNode(_activeFastaSeq ?? new PeptideGroup(_peptides.Any(p => p.IsDecoy)),
Name, Description, _peptides.ToArray());
}
else if (_customName) // name travels in the PeptideGroupDocNode instead of the FastaSequence
{
nodePepGroup = new PeptideGroupDocNode(
new FastaSequence(null, null, Alternatives, _sequence.ToString()),
Name, Description, new PeptideDocNode[0]);
}
else // name travels with the FastaSequence
{
nodePepGroup = new PeptideGroupDocNode(
new FastaSequence(Name, Description, Alternatives, _sequence.ToString()),
null, null, new PeptideDocNode[0]);
}
// If this is a fasta file with no explicitly modified peptides, then apply
// the usual peptide filtering rules. Otherwise, keep all peptides the user input.
if (!_autoManageChildren)
nodePepGroup = (PeptideGroupDocNode) nodePepGroup.ChangeAutoManageChildren(false);
// Materialize children, so that we have accurate accounting of
// peptide and transition counts.
nodePepGroup = nodePepGroup.ChangeSettings(_settings, diff);
List<DocNode> newChildren = new List<DocNode>(nodePepGroup.Children.Count);
foreach (PeptideDocNode nodePep in nodePepGroup.Children)
{
var nodePepAdd = nodePep;
Adduct charge;
if (_charges.TryGetValue(nodePep.Id.GlobalIndex, out charge))
{
var settingsCharge = _settings.ChangeTransitionFilter(f => f.ChangePeptidePrecursorCharges(new[] {charge}));
nodePepAdd = (PeptideDocNode) nodePep.ChangeSettings(settingsCharge, diff)
.ChangeAutoManageChildren(false);
}
newChildren.Add(nodePepAdd);
}
return (PeptideGroupDocNode) nodePepGroup.ChangeChildren(newChildren);
}
}
public class TransitionImportErrorInfo
{
public long? LineNum { get; private set; }
public int? Column { get; private set; }
public string ErrorMessage { get; private set; }
public string LineText { get; private set; }
public TransitionImportErrorInfo(string errorMessage, int? columnIndex, long? lineNum, string lineText)
{
ErrorMessage = errorMessage;
LineText = lineText;
Column = columnIndex + 1; // 1 based column number for reporting to a user
LineNum = lineNum;
}
}
class TransitionGroupLibraryIrtTriple
{
public SpectrumMzInfo SpectrumInfo { get; set; }
public TransitionGroupDocNode NodeGroup { get; set; }
public double? Irt { get; set; }
public double PrecursorMz { get; set; }
public TransitionGroupLibraryIrtTriple(SpectrumMzInfo spectrumInfo, TransitionGroupDocNode nodeGroup, double? irt, double precursorMz)
{
SpectrumInfo = spectrumInfo;
NodeGroup = nodeGroup;
Irt = irt;
PrecursorMz = precursorMz;
}
public static int CompareTriples(TransitionGroupLibraryIrtTriple p1, TransitionGroupLibraryIrtTriple p2)
{
int groupComparison = Peptide.CompareGroups(p1.NodeGroup, p2.NodeGroup);
if (groupComparison != 0)
return groupComparison;
if (!p1.Irt.HasValue)
return p2.Irt.HasValue ? -1 : 0;
if (!p2.Irt.HasValue)
return 1;
return p1.Irt.Value.CompareTo(p2.Irt.Value);
}
}
public class FastaData
{
private FastaData(string name, string sequence)
{
Name = name;
Sequence = sequence;
}
public string Name { get; private set; }
public string Sequence { get; private set; }
public static void AppendSequence(StringBuilder sequence, string line)
{
var seq = FastaSequence.StripModifications(line);
// Get rid of whitespace
seq = seq.Replace(@" ", string.Empty).Trim();
// Get rid of end of sequence indicator
if (seq.EndsWith(@"*"))
seq = seq.Substring(0, seq.Length - 1);
sequence.Append(seq);
}
public static IEnumerable<FastaData> ParseFastaFile(TextReader reader)
{
string line;
string name = string.Empty;
StringBuilder sequence = new StringBuilder();
while ((line = reader.ReadLine()) != null)
{
if (line.StartsWith(@">"))
{
if (!string.IsNullOrEmpty(name))
{
yield return new FastaData(name, sequence.ToString());
sequence.Clear();
}
var split = line.Split(TextUtil.SEPARATOR_SPACE);
// Remove the '>'
name = split[0].Remove(0, 1).Trim();
}
else
{
AppendSequence(sequence, line);
}
}
// Add the last fasta sequence
yield return new FastaData(name, sequence.ToString());
}
}
}
| 1 | 14,348 | Looks like my fault for sure in trying to handle international inputs regardless of locale. I think the correct fix is actually to get rid of TrySplitColumns and use ParseDsvFields instead. It is what gets used in the end, and it already has logic for dealing with quoted fields. | ProteoWizard-pwiz | .cs |
@@ -2733,6 +2733,7 @@ describe('Find', function() {
cursors[0].next(function(err) {
test.equal(null, err);
+ cursors[0].close();
client.close();
done();
}); | 1 | 'use strict';
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase;
describe('Find', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* Test a simple find
* @ignore
*/
it('shouldCorrectlyPerformSimpleFind', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple', function(err, collection) {
var doc1 = null;
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
doc1 = r.ops[0];
// Ensure correct insertion testing via the cursor and the count function
collection.find().toArray(function(err, documents) {
test.equal(2, documents.length);
collection.count(function(err, count) {
test.equal(2, count);
// Fetch values by selection
collection.find({ a: doc1.a }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(doc1.a, documents[0].a);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
}
});
/**
* Test a simple find chained
* @ignore
*/
it('shouldCorrectlyPerformSimpleChainedFind', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_simple_chained', function(err) {
test.equal(null, err);
db.collection('test_find_simple_chained', function(err, collection) {
var doc1 = null;
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
doc1 = r.ops[0];
// Ensure correct insertion testing via the cursor and the count function
collection.find().toArray(function(err, documents) {
test.equal(2, documents.length);
collection.count(function(err, count) {
test.equal(2, count);
// Fetch values by selection
collection.find({ a: doc1.a }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(doc1.a, documents[0].a);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
}
});
/**
* Test advanced find
* @ignore
*/
it('shouldCorrectlyPerformAdvancedFinds', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_find_advanced');
// Insert some test documents
collection.insert([{ a: 1 }, { a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
var doc1 = r.ops[0],
doc2 = r.ops[1];
// Locate by less than
collection.find({ a: { $lt: 10 } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate by greater than
collection.find({ a: { $gt: 1 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(2, documents[0].a);
// Locate by less than or equal to
collection.find({ a: { $lte: 1 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(1, documents[0].a);
// Locate by greater than or equal to
collection.find({ a: { $gte: 1 } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate by between
collection.find({ a: { $gt: 1, $lt: 3 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(2, documents[0].a);
// Locate in clause
collection.find({ a: { $in: [1, 2] } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate in _id clause
collection
.find({ _id: { $in: [doc1['_id'], doc2['_id']] } })
.toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
});
});
}
});
/**
* Test sorting of results
* @ignore
*/
it('shouldCorrectlyPerformFindWithSort', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_sorting', function(err) {
test.equal(null, err);
db.collection('test_find_sorting', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 1, b: 2 }, { a: 2, b: 1 }, { a: 3, b: 2 }, { a: 4, b: 1 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Test sorting (ascending)
collection
.find({ a: { $lt: 10 } }, { sort: [['a', 1]] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Test sorting (descending)
collection
.find({ a: { $lt: 10 } }, { sort: [['a', -1]] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(4, documents[0].a);
test.equal(3, documents[1].a);
test.equal(2, documents[2].a);
test.equal(1, documents[3].a);
// Test sorting (descending), sort is hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: -1 } })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(4, documents[0].a);
test.equal(3, documents[1].a);
test.equal(2, documents[2].a);
test.equal(1, documents[3].a);
// Sorting using array of names, assumes ascending order
collection
.find({ a: { $lt: 10 } }, { sort: ['a'] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Sorting using single name, assumes ascending order
collection
.find({ a: { $lt: 10 } }, { sort: 'a' })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Sorting using single name, assumes ascending order, sort is hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: 1 } })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
collection
.find({ a: { $lt: 10 } }, { sort: ['b', 'a'] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(2, documents[0].a);
test.equal(4, documents[1].a);
test.equal(1, documents[2].a);
test.equal(3, documents[3].a);
// Sorting using empty array, no order guarantee should not blow up
collection
.find({ a: { $lt: 10 } }, { sort: [] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
/* NONACTUAL */
// Sorting using ordered hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: -1 } })
.toArray(function(err, documents) {
// Fail test if not an error
test.equal(4, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* Test the limit function of the db
* @ignore
*/
it('shouldCorrectlyPerformFindWithLimit', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_limits', function(err) {
test.equal(null, err);
db.collection('test_find_limits', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }, { d: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Test limits
collection.find({}, { limit: 1 }).toArray(function(err, documents) {
test.equal(1, documents.length);
collection.find({}, { limit: 2 }).toArray(function(err, documents) {
test.equal(2, documents.length);
collection.find({}, { limit: 3 }).toArray(function(err, documents) {
test.equal(3, documents.length);
collection.find({}, { limit: 4 }).toArray(function(err, documents) {
test.equal(4, documents.length);
collection.find({}, {}).toArray(function(err, documents) {
test.equal(4, documents.length);
collection.find({}, { limit: 99 }).toArray(function(err, documents) {
test.equal(4, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* Test find by non-quoted values (issue #128)
* @ignore
*/
it('shouldCorrectlyFindWithNonQuotedValues', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_non_quoted_values', function(err) {
test.equal(null, err);
db.collection('test_find_non_quoted_values', function(err, collection) {
// insert test document
collection.insert(
[{ a: 19, b: 'teststring', c: 59920303 }, { a: '19', b: 'teststring', c: 3984929 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.find({ a: 19 }).toArray(function(err, documents) {
test.equal(1, documents.length);
client.close();
done();
});
}
);
});
});
});
}
});
/**
* Test for querying embedded document using dot-notation (issue #126)
* @ignore
*/
it('shouldCorrectlyFindEmbeddedDocument', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_embedded_document', function(err) {
test.equal(null, err);
db.collection('test_find_embedded_document', function(err, collection) {
// insert test document
collection.insert(
[
{ a: { id: 10, value: 'foo' }, b: 'bar', c: { id: 20, value: 'foobar' } },
{ a: { id: 11, value: 'foo' }, b: 'bar2', c: { id: 20, value: 'foobar' } }
],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// test using integer value
collection.find({ 'a.id': 10 }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal('bar', documents[0].b);
// test using string value
collection.find({ 'a.value': 'foo' }).toArray(function(err, documents) {
// should yield 2 documents
test.equal(2, documents.length);
test.equal('bar', documents[0].b);
test.equal('bar2', documents[1].b);
client.close();
done();
});
});
}
);
});
});
});
}
});
/**
* Find no records
* @ignore
*/
it('shouldCorrectlyFindNoRecords', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_one_no_records', function(err) {
test.equal(null, err);
db.collection('test_find_one_no_records', function(err, collection) {
test.equal(null, err);
collection.find({ a: 1 }, {}).toArray(function(err, documents) {
test.equal(0, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindByWhere', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var Code = configuration.require.Code;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_where', function(err, collection) {
collection.insert(
[{ a: 1 }, { a: 2 }, { a: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.count(function(err, count) {
test.equal(null, err);
test.equal(3, count);
// Let's test usage of the $where statement
collection.find({ $where: new Code('this.a > 2') }).count(function(err, count) {
test.equal(null, err);
test.equal(1, count);
collection
.find({ $where: new Code('this.a > i', { i: 1 }) })
.count(function(err, count) {
test.equal(null, err);
test.equal(2, count);
// Let's close the db
client.close();
done();
});
});
});
}
);
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindsWithHintTurnedOn', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_hint', function(err, collection) {
collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
db.createIndex(
collection.collectionName,
'a',
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.find({ a: 1 }, { hint: 'a' }).toArray(function(err) {
test.ok(err != null);
collection.find({ a: 1 }, { hint: ['a'] }).toArray(function(err, items) {
test.equal(1, items.length);
collection.find({ a: 1 }, { hint: { a: 1 } }).toArray(function(err, items) {
test.equal(1, items.length);
// Modify hints
collection.hint = 'a_1';
test.equal('a_1', collection.hint);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = ['a'];
test.equal(1, collection.hint['a']);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = { a: 1 };
test.equal(1, collection.hint['a']);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = null;
test.ok(collection.hint == null);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindByObjectID', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_by_oid', function(err, collection) {
collection.save({ hello: 'mike' }, configuration.writeConcernMax(), function(err, r) {
var docs = r.ops[0];
test.ok(
docs._id instanceof ObjectID ||
Object.prototype.toString.call(docs._id) === '[object ObjectID]'
);
collection.findOne({ _id: docs._id }, function(err, doc) {
test.equal('mike', doc.hello);
var id = doc._id.toString();
collection.findOne({ _id: new ObjectID(id) }, function(err, doc) {
test.equal('mike', doc.hello);
// Let's close the db
client.close();
done();
});
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyReturnDocumentWithOriginalStructure', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_by_oid_with_subdocs', function(err, collection) {
var c1 = { _id: new ObjectID(), comments: [], title: 'number 1' };
var c2 = { _id: new ObjectID(), comments: [], title: 'number 2' };
var doc = {
numbers: [],
owners: [],
comments: [c1, c2],
_id: new ObjectID()
};
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findOne({ _id: doc._id }, { w: 1, fields: undefined }, function(err, doc) {
if (err) console.error('error', err);
test.equal(2, doc.comments.length);
test.equal('number 1', doc.comments[0].title);
test.equal('number 2', doc.comments[1].title);
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyRetrieveSingleRecord', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_should_correctly_retrieve_one_record', function(err, collection) {
collection.insert({ a: 0 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
db.collection('test_should_correctly_retrieve_one_record', function(
err,
usercollection
) {
usercollection.findOne({ a: 0 }, function(err) {
test.equal(null, err);
p_client.close();
done();
});
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyHandleError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_one_error_handling', function(err, collection) {
// Try to fetch an object using a totally invalid and wrong hex string... what we're interested in here
// is the error handling of the findOne Method
try {
collection.findOne(
{ _id: ObjectID.createFromHexString('5e9bd59248305adf18ebc15703a1') },
function() {}
);
} catch (err) {
client.close();
done();
}
});
});
}
});
/**
* Test field select with options
* @ignore
*/
it('shouldCorrectlyPerformFindWithOptions', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_field_select_with_options', function(err) {
test.equal(null, err);
db.collection('test_field_select_with_options', function(err, collection) {
var docCount = 25,
docs = [];
// Insert some test documents
while (docCount--) docs.push({ a: docCount, b: docCount });
collection.insert(docs, configuration.writeConcernMax(), function(err, retDocs) {
docs = retDocs;
collection
.find({}, { limit: 3, sort: [['a', -1]], projection: { a: 1 } })
.toArray(function(err, documents) {
test.equal(3, documents.length);
documents.forEach(function(doc, idx) {
test.equal(undefined, doc.b); // making sure field select works
test.equal(24 - idx, doc.a); // checking limit sort object with field select
});
client.close();
done();
});
});
});
});
});
}
});
/**
* Test findAndModify a document
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocument', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_1', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, updated_doc) {
test.equal(1, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 2 },
[['a', 1]],
{ $set: { b: 3 } },
configuration.writeConcernMax(),
function(err, result) {
test.equal(2, result.value.a);
test.equal(2, result.value.b);
// Test remove object on change
collection.insert({ a: 3, b: 2 }, configuration.writeConcernMax(), function(
err
) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 3 },
[],
{ $set: { b: 3 } },
{ remove: true },
function(err, updated_doc) {
test.equal(3, updated_doc.value.a);
test.equal(2, updated_doc.value.b);
// Let's upsert!
collection.findAndModify(
{ a: 4 },
[],
{ $set: { b: 3 } },
{ new: true, upsert: true },
function(err, updated_doc) {
test.equal(4, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
// Test selecting a subset of fields
collection.insert(
{ a: 100, b: 101 },
configuration.writeConcernMax(),
function(err, r) {
test.equal(null, err);
collection.findAndModify(
{ a: 100 },
[],
{ $set: { b: 5 } },
{ new: true, fields: { b: 1 } },
function(err, updated_doc) {
test.equal(2, Object.keys(updated_doc.value).length);
test.equal(
r.ops[0]['_id'].toHexString(),
updated_doc.value._id.toHexString()
);
test.equal(5, updated_doc.value.b);
test.equal('undefined', typeof updated_doc.value.a);
client.close();
done();
}
);
}
);
}
);
}
);
});
}
);
});
}
);
});
});
});
}
});
/**
* Test findAndModify a document with fields
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentAndReturnSelectedFieldsOnly', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_2', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true, fields: { a: 1 } },
function(err, updated_doc) {
test.equal(2, Object.keys(updated_doc.value).length);
test.equal(1, updated_doc.value.a);
client.close();
done();
}
);
});
});
});
}
});
/**
* @ignore
*/
it('ShouldCorrectlyLocatePostAndIncValues', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyExecuteFindOneWithAnInSearchTag', function(
err,
collection
) {
// Test return new document on change
collection.insert(
{
title: 'Tobi',
author: 'Brian',
newTitle: 'Woot',
meta: { visitors: 0 }
},
configuration.writeConcernMax(),
function(err, r) {
// Fetch the id
var id = r.ops[0]._id;
collection.update(
{ _id: id },
{ $inc: { 'meta.visitors': 1 } },
configuration.writeConcernMax(),
function(err, r) {
test.equal(1, r.result.n);
test.equal(null, err);
collection.findOne({ _id: id }, function(err, item) {
test.equal(1, item.meta.visitors);
client.close();
done();
});
}
);
}
);
});
});
}
});
/**
* Test findAndModify a document
* @ignore
*/
it('Should Correctly Handle FindAndModify Duplicate Key Error', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('FindAndModifyDuplicateKeyError', function(err, collection) {
collection.ensureIndex(['name', 1], { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Test return new document on change
collection.insert(
[{ name: 'test1' }, { name: 'test2' }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ name: 'test1' },
[],
{ $set: { name: 'test2' } },
{},
function(err, updated_doc) {
test.equal(null, updated_doc);
test.ok(err != null);
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly return null when attempting to modify a non-existing document', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('AttemptToFindAndModifyNonExistingDocument', function(err, collection) {
// Let's modify the document in place
collection.findAndModify({ name: 'test1' }, [], { $set: { name: 'test2' } }, {}, function(
err,
updated_doc
) {
test.equal(null, updated_doc.value);
test.ok(err == null || err.errmsg.match('No matching object found'));
client.close();
done();
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly handle chained skip and limit on find with toArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('skipAndLimitOnFindWithToArray', function(err, collection) {
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find()
.skip(1)
.limit(-1)
.toArray(function(err, items) {
test.equal(null, err);
test.equal(1, items.length);
test.equal(2, items[0].b);
client.close();
done();
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly handle chained skip and negative limit on find with toArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('skipAndNegativeLimitOnFindWithToArray', function(err, collection) {
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }, { d: 4 }, { e: 5 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find()
.skip(1)
.limit(-3)
.toArray(function(err, items) {
test.equal(null, err);
test.equal(3, items.length);
test.equal(2, items[0].b);
test.equal(3, items[1].c);
test.equal(4, items[2].d);
client.close();
done();
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly pass timeout options to cursor', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('timeoutFalse', function(err, collection) {
collection.find({}, { timeout: false }, function(err, cursor) {
test.equal(false, cursor.s.cmd.noCursorTimeout);
collection.find({}, { timeout: true }, function(err, cursor) {
test.equal(true, cursor.s.cmd.noCursorTimeout);
collection.find({}, {}, function(err, cursor) {
test.ok(!cursor.s.cmd.noCursorTimeout);
client.close();
done();
});
});
});
});
});
}
});
/**
* Test findAndModify a document with strict mode enabled
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentWithDBStrict', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyFindAndModifyDocumentWithDBStrict', function(
err,
collection
) {
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 2 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, result) {
test.equal(2, result.value.a);
test.equal(3, result.value.b);
p_client.close();
done();
}
);
});
});
});
}
});
/**
* Test findAndModify a document that fails in first step before safe
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentThatFailsInFirstStep', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyFindAndModifyDocumentThatFailsInFirstStep', function(
err,
collection
) {
// Set up an index to force duplicate index erro
collection.ensureIndex([['failIndex', 1]], { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Setup a new document
collection.insert(
{ a: 2, b: 2, failIndex: 2 },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Let's attempt to upsert with a duplicate key error
collection.findAndModify(
{ c: 2 },
[['a', 1]],
{ a: 10, b: 10, failIndex: 2 },
{ w: 1, upsert: true },
function(err, result) {
test.equal(null, result);
test.ok(err.errmsg.match('duplicate key'));
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly return new modified document', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('Should_correctly_return_new_modified_document', function(
err,
collection
) {
var id = new ObjectID();
var doc = { _id: id, a: 1, b: 1, c: { a: 1, b: 1 } };
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Find and modify returning the new object
collection.findAndModify(
{ _id: id },
[],
{ $set: { 'c.c': 100 } },
{ new: true },
function(err, item) {
test.equal(doc._id.toString(), item.value._id.toString());
test.equal(doc.a, item.value.a);
test.equal(doc.b, item.value.b);
test.equal(doc.c.a, item.value.c.a);
test.equal(doc.c.b, item.value.c.b);
test.equal(100, item.value.c.c);
client.close();
done();
}
);
});
});
});
}
});
/**
* Should correctly execute findAndModify that is breaking in prod
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModify', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyExecuteFindAndModify', function(err, collection) {
var self = { _id: new ObjectID() };
var _uuid = 'sddffdss';
collection.findAndModify(
{ _id: self._id, 'plays.uuid': _uuid },
[],
{ $set: { 'plays.$.active': true } },
{ new: true, fields: { plays: 0, results: 0 }, safe: true },
function(err) {
test.equal(null, err);
client.close();
done();
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly return record with 64-bit id', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID,
Long = configuration.require.Long;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('should_correctly_return_record_with_64bit_id', function(
err,
collection
) {
var _lowerId = new ObjectID();
var _higherId = new ObjectID();
var lowerId = new Long.fromString('133118461172916224', 10);
var higherId = new Long.fromString('133118461172916225', 10);
var lowerDoc = { _id: _lowerId, id: lowerId };
var higherDoc = { _id: _higherId, id: higherId };
collection.insert([lowerDoc, higherDoc], configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Select record with id of 133118461172916225 using $gt directive
collection.find({ id: { $gt: lowerId } }, {}).toArray(function(err, arr) {
test.ok(err == null);
test.equal(
arr.length,
1,
'Selecting record via $gt directive on 64-bit integer should return a record with higher Id'
);
test.equal(
arr[0].id.toString(),
'133118461172916225',
'Returned Id should be equal to 133118461172916225'
);
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('Should Correctly find a Document using findOne excluding _id field', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'Should_Correctly_find_a_Document_using_findOne_excluding__id_field',
function(err, collection) {
var doc = { _id: new ObjectID(), a: 1, c: 2 };
// insert doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Get one document, excluding the _id field
collection.findOne({ a: 1 }, { fields: { _id: 0 } }, function(err, item) {
test.equal(undefined, item._id);
test.equal(1, item.a);
test.equal(2, item.c);
collection.find({ a: 1 }, { fields: { _id: 0 } }).toArray(function(err, items) {
var item = items[0];
test.equal(undefined, item._id);
test.equal(1, item.a);
test.equal(2, item.c);
p_client.close();
done();
});
});
});
}
);
});
}
});
/**
* @ignore
*/
it('Should correctly execute find queries with selector set to null', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'Should_correctly_execute_find_and_findOne_queries_in_the_same_way',
function(err, collection) {
var doc = { _id: new ObjectID(), a: 1, c: 2, comments: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] };
// insert doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection
.find({ _id: doc._id })
.project({ comments: { $slice: -5 } })
.toArray(function(err, docs) {
test.equal(5, docs[0].comments.length);
client.close();
done();
});
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyHandlerErrorForFindAndModifyWhenNoRecordExists', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'shouldCorrectlyHandlerErrorForFindAndModifyWhenNoRecordExists',
function(err, collection) {
collection.findAndModify({ a: 1 }, [], { $set: { b: 3 } }, { new: true }, function(
err,
updated_doc
) {
test.equal(null, err);
test.equal(null, updated_doc.value);
client.close();
done();
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModifyShouldGenerateCorrectBSON', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var transaction = {};
transaction.document = {};
transaction.document.type = 'documentType';
transaction.document.id = new ObjectID();
transaction.transactionId = new ObjectID();
transaction.amount = 12.3333;
var transactions = [];
transactions.push(transaction);
// Wrapping object
var wrapingObject = {
funds: {
remaining: 100.5
},
transactions: transactions
};
db.createCollection('shouldCorrectlyExecuteFindAndModify', function(err, collection) {
test.equal(null, err);
collection.insert(wrapingObject, configuration.writeConcernMax(), function(err, r) {
test.equal(null, err);
collection.findOne(
{
_id: r.ops[0]._id,
'funds.remaining': { $gte: 3.0 },
'transactions.id': { $ne: transaction.transactionId }
},
function(err, item) {
test.ok(item != null);
collection.findAndModify(
{
_id: r.ops[0]._id,
'funds.remaining': { $gte: 3.0 },
'transactions.id': { $ne: transaction.transactionId }
},
[],
{ $push: { transactions: transaction } },
{ new: true, safe: true },
function(err) {
test.equal(null, err);
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteMultipleFindsInParallel', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('tasks', function(err, collection) {
var numberOfOperations = 0;
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection
.find(
{
user_id: '4e9fc8d55883d90100000003',
lc_status: { $ne: 'deleted' },
owner_rating: { $exists: false }
},
{ skip: 0, limit: 10, sort: { updated: -1 } }
)
.count(function(err) {
test.equal(null, err);
numberOfOperations = numberOfOperations + 1;
if (numberOfOperations === 2) {
done();
p_client.close();
}
});
collection
.find(
{
user_id: '4e9fc8d55883d90100000003',
lc_status: { $ne: 'deleted' },
owner_rating: { $exists: false }
},
{ skip: 0, limit: 10, sort: { updated: -1 } }
)
.count(function(err) {
test.equal(null, err);
numberOfOperations = numberOfOperations + 1;
if (numberOfOperations === 2) {
done();
p_client.close();
}
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyReturnErrorFromMongodbOnFindAndModifyForcedError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'shouldCorrectlyReturnErrorFromMongodbOnFindAndModifyForcedError',
function(err, collection) {
var q = { x: 1 };
var set = { y: 2, _id: new ObjectID() };
var opts = { new: true, upsert: true };
// Original doc
var doc = { _id: new ObjectID(), x: 1 };
// Insert original doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findAndModify(q, [], set, opts, function(/* err */) {
client.close();
done();
});
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModifyUnderConcurrentLoad', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
var running = true;
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection
db.collection('collection1', function(err, collection) {
// Wait a bit and then execute something that will throw a duplicate error
setTimeout(function() {
var id = new ObjectID();
collection.insert({ _id: id, a: 1 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.insert({ _id: id, a: 1 }, configuration.writeConcernMax(), function(err) {
test.ok(err !== null);
running = false;
done();
p_client.close();
});
});
}, 200);
});
db.collection('collection2', function(err, collection) {
// Keep hammering in inserts
var insert = function() {
process.nextTick(function() {
collection.insert({ a: 1 });
if (running) process.nextTick(insert);
});
};
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyIterateOverCollection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
var numberOfSteps = 0;
// Open db connection
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection
var collection = db.collection('shouldCorrectlyIterateOverCollection');
// Insert 1000 documents
var insertF = function(l, callback) {
collection.insert(
{ a: 1, b: 2, c: { d: 3, f: 'sfdsffffffffffffffffffffffffffffff' } },
function() {
l = l - 1;
if (l > 0) return insertF(l, callback);
callback();
}
);
};
insertF(500, function() {
var cursor = collection.find({}, {});
cursor.count(function(err) {
test.equal(null, err);
cursor.each(function(err, obj) {
if (obj == null) {
p_client.close();
test.equal(500, numberOfSteps);
done();
} else {
numberOfSteps = numberOfSteps + 1;
}
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyErrorOutFindAndModifyOnDuplicateRecord', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(err, null);
db.createCollection('shouldCorrectlyErrorOutFindAndModifyOnDuplicateRecord', function(
err,
collection
) {
test.equal(err, null);
// Test return old document on change
collection.insert(
[{ login: 'user1' }, { login: 'user2' }],
configuration.writeConcernMax(),
function(err, r) {
test.equal(err, null);
var id = r.ops[1]._id;
// Set an index
collection.ensureIndex('login', { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Attemp to modify document
collection.findAndModify(
{ _id: id },
[],
{ $set: { login: 'user1' } },
{},
function(err) {
test.ok(err !== null);
p_client.close();
done();
}
);
});
}
);
});
});
}
});
/**
* An example of using find with a very large in parameter
*
* @ignore
*/
it('shouldPerformSimpleFindInArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_in_array', function(err, collection) {
test.equal(null, err);
var docs = [];
for (var i = 0; i < 100; i++) docs.push({ a: i });
// Insert some test documentations
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Find all the variables in a specific array
for (var i = 0; i < 100; i++) docs.push(i);
// Fin all in
collection.find({ a: { $in: docs } }).toArray(function(err, items) {
test.equal(null, err);
test.equal(100, items.length);
client.close();
done();
});
});
});
});
}
});
it('shouldReturnInstanceofErrorWithBadFieldSelection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
var col = db.collection('bad_field_selection');
col.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
col.find({}, { skip: 1, limit: 1, fields: { _id: 1, b: 0 } }).toArray(function(err) {
test.ok(err instanceof Error);
client.close();
done();
});
}
);
});
}
});
/**
* A simple query using find and fields
*/
it('shouldPerformASimpleLimitSkipFindWithFields', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_with_fields', function(err, collection) {
test.equal(null, err);
// Insert a bunch of documents for the testing
collection.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
client.close();
done();
});
});
}
);
});
});
}
});
/**
* A simple query using find and fields
*/
it('shouldPerformASimpleLimitSkipFindWithFields2', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_with_fields_2', function(err, collection) {
test.equal(null, err);
// Insert a bunch of documents for the testing
collection.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
client.close();
done();
});
}
);
});
});
}
});
/**
* A simple query with a different batchSize
*/
it('shouldPerformQueryWithBatchSizeDifferentToStandard', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('shouldPerformQueryWithBatchSizeDifferentToStandard', function(
err,
collection
) {
test.equal(null, err);
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Insert a bunch of documents for the testing
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { batchSize: 1000 }).toArray(function(err, docs) {
test.equal(null, err);
test.equal(1000, docs.length);
client.close();
done();
});
});
});
});
}
});
/**
* A simple query with negative limit
*/
it('shouldCorrectlyPerformNegativeLimit', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.collection('shouldCorrectlyPerformNegativeLimit', function(err, collection) {
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld'
});
}
// Insert a bunch of documents
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({})
.limit(-10)
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs.length);
client.close();
done();
});
});
});
});
}
});
/**
* Should perform an exhaust find query
*/
it('shouldCorrectlyExecuteExhaustQuery', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var Binary = configuration.require.Binary;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.collection('shouldCorrectlyExecuteExhaustQuery', function(err, collection) {
test.equal(null, err);
var docs1 = [];
for (var i = 0; i < 1000; i++) {
docs1.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld',
c: new Binary(new Buffer(1024))
});
}
// Insert a bunch of documents
collection.insert(docs1, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
for (var i = 0; i < 1000; i++) {
var docs2 = [];
docs2.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld',
c: new Binary(new Buffer(1024))
});
}
collection.insert(docs2, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { exhaust: true }).toArray(function(err, docs3) {
test.equal(null, err);
test.equal(docs1.length + docs2.length, docs3.length);
client.close();
done();
});
});
});
});
});
}
});
it('Readpreferences should work fine when using a single server instance', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
var docs = [];
for (var i = 0; i < 1; i++) {
docs.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld'
});
}
// Create a collection we want to drop later
db.collection('Readpreferencesshouldworkfine', function(err, collection) {
// Insert a bunch of documents
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { exhaust: true }).toArray(function(err, docs2) {
test.equal(null, err);
test.equal(docs.length, docs2.length);
client.close();
done();
});
});
});
});
}
});
it('Each should not hang on iterating over no results', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
// Create a collection we want to drop later
db.collection('noresultAvailableForEachToIterate', function(err, collection) {
// Perform a simple find and return all the documents
collection.find({}).each(function(err, item) {
test.equal(null, item);
client.close();
done();
});
});
});
}
});
it('shouldCorrectlyFindDocumentsByRegExp', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Serialized regexes contain extra trailing chars. Sometimes these trailing chars contain / which makes
// the original regex invalid, and leads to segmentation fault.
db.createCollection('test_regex_serialization', function(err, collection) {
collection.insert(
{ keywords: ['test', 'segmentation', 'fault', 'regex', 'serialization', 'native'] },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
var count = 20,
run = function(i) {
// search by regex
collection.findOne(
{ keywords: { $all: [/ser/, /test/, /seg/, /fault/, /nat/] } },
function(err, item) {
test.equal(6, item.keywords.length);
if (i === 0) {
client.close();
done();
}
}
);
};
// loop a few times to catch the / in trailing chars case
while (count--) {
run(count);
}
}
);
});
});
}
});
it('shouldCorrectlyDoFindMinMax', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Serialized regexes contain extra trailing chars. Sometimes these trailing chars contain / which makes
// the original regex invalid, and leads to segmentation fault.
db.createCollection('shouldCorrectlyDoFindMinMax', function(err, collection) {
collection.insert(
{ _id: 123, name: 'some name', min: 1, max: 10 },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find({ _id: { $in: ['some', 'value', 123] } })
.project({ _id: 1, max: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs[0].max);
collection
.find({ _id: { $in: ['some', 'value', 123] } }, { fields: { _id: 1, max: 1 } })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs[0].max);
client.close();
done();
});
});
}
);
});
});
}
});
it('Should correctly execute parallelCollectionScan with multiple cursors using each', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_2');
// Insert 2000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 3;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
test.ok(cursors.length <= numCursors);
var left = cursors.length;
for (var i = 0; i < cursors.length; i++) {
cursors[i].each(function(err, item) {
test.equal(err, null);
// Add item to list
if (item) results.push(item);
// Finished each
if (item == null) {
left = left - 1;
// No more cursors let's ensure we got all results
if (left === 0) {
test.equal(docs.length, results.length);
// Ensure all cursors are closed
for (var j = 0; j < cursors.length; j++) {
test.equal(true, cursors[j].isClosed());
}
client.close();
return done();
}
}
});
}
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with multiple cursors using next', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_3');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 3;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
test.ok(cursors.length <= numCursors);
var left = cursors.length;
var iterate = _cursor => {
_cursor.toArray().then(_docs => {
results = results.concat(_docs);
left--;
// No more cursors let's ensure we got all results
test.equal(true, _cursor.isClosed());
if (left === 0) {
test.equal(docs.length, results.length);
client.close();
return done();
}
});
};
for (var i = 0; i < cursors.length; i++) {
iterate(cursors[i]);
}
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with single cursor and close', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].close(function(err, result) {
test.equal(null, err);
test.ok(result != null);
test.equal(true, cursors[0].isClosed());
client.close();
done();
});
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with single cursor streaming', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_5');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].on('data', function(data) {
results.push(data);
});
cursors[0].on('end', function() {
test.equal(docs.length, results.length);
test.equal(true, cursors[0].isClosed());
client.close();
done();
});
});
});
});
}
});
it('Should correctly sort using text search on 2.6 or higher in find', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
mongodb: '>2.5.5',
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger']
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get the collection
var collection = db.collection('textSearchWithSort');
collection.ensureIndex({ s: 'text' }, function(err) {
test.equal(null, err);
collection.insert(
[{ s: 'spam' }, { s: 'spam eggs and spam' }, { s: 'sausage and eggs' }],
function(err) {
test.equal(null, err);
collection
.find(
{ $text: { $search: 'spam' } },
{ fields: { _id: false, s: true, score: { $meta: 'textScore' } } }
)
.sort({ score: { $meta: 'textScore' } })
.toArray(function(err, items) {
test.equal(null, err);
test.equal('spam eggs and spam', items[0].s);
client.close();
done();
});
}
);
});
});
}
});
it('shouldNotMutateUserOptions', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldNotMutateUserOptions');
var options = { raw: 'TEST' };
collection.find({}, options, function(error) {
test.equal(null, error);
test.equal(undefined, options.skip);
test.equal(undefined, options.limit);
test.equal('TEST', options.raw);
client.close();
done();
});
});
}
});
it(
'Should correctly execute parallelCollectionScan with single cursor emitting raw buffers and close',
{
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors, raw: true }, function(
err,
cursors
) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].next(function(err) {
test.equal(null, err);
client.close();
done();
});
});
});
});
}
}
);
it('Should simulate closed cursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
// Get the cursor
var cursor = collection.find({}).batchSize(2);
// Get next document
cursor.next(function(err, doc) {
test.equal(null, err);
test.ok(doc != null);
// Mess with state forcing a call to isDead on the cursor
cursor.s.state = 2;
cursor.next(function(err) {
test.ok(err !== null);
client.close();
done();
});
});
});
});
}
});
/**
* Find and modify should allow for a write Concern without failing
* @ignore
*/
it('should correctly execute a findAndModifyWithAWriteConcern', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_3', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, updated_doc) {
test.equal(1, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
client.close();
done();
}
);
});
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using batchSize of 0', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_batchsize_0', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 2 }, { b: 3 }, { b: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.batchSize(-5)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(3, documents.length);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using limit of 0', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_limit_0', function(err, collection) {
test.equal(null, err);
// Insert some test documents
collection.insert(
[{ a: 2 }, { b: 3 }, { b: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.limit(-5)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(3, documents.length);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using $elemMatch', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('elem_match_test', function(err, collection) {
test.equal(null, err);
// Insert some test documents
collection.insert(
[{ _id: 1, results: [82, 85, 88] }, { _id: 2, results: [75, 88, 89] }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find({ results: { $elemMatch: { $gte: 80, $lt: 85 } } })
.toArray(function(err, documents) {
test.equal(null, err);
test.deepEqual([{ _id: 1, results: [82, 85, 88] }], documents);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using limit of 101', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_limit_101', function(err, collection) {
test.equal(null, err);
function clone(obj) {
var o = {};
for (var name in obj) o[name] = obj[name];
return o;
}
var template = {
linkid: '12633170',
advertisercid: '4612127',
websitename: 'Car Rental 8',
destinationurl: 'https://www.carrental8.com/en/',
who: '8027061-12633170-1467924618000',
href: 'http://www.tkqlhce.com',
src: 'http://www.awltovhc.com',
r1: 3,
r2: 44,
r3: 24,
r4: 58
};
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push(clone(template));
}
// Insert some test documents
collection.insertMany(docs, configuration.writeConcernMax(), function(err, r) {
test.equal(null, err);
test.ok(r);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.limit(200)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(200, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('Should correctly apply db level options to find cursor', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var listener = require('../..').instrument(function(err) {
test.equal(null, err);
});
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
MongoClient.connect(
configuration.url(),
{
ignoreUndefined: true
},
function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_find_simple_cursor_inheritance');
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3, c: undefined }], function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
var cursor = collection.find({ c: undefined });
test.equal(true, cursor.s.options.ignoreUndefined);
cursor.toArray(function(err, documents) {
// console.dir(documents)
test.equal(2, documents.length);
// process.exit(0)
listener.uninstrument();
// Let's close the db
client.close();
done();
});
});
}
);
}
});
});
| 1 | 14,197 | similarly here, are the these cursors not being closed when the client is closed? | mongodb-node-mongodb-native | js |
@@ -22,7 +22,7 @@ import (
)
var (
- supportedPolicies = make(map[string]func() Policy)
+ supportedPolicies = make(map[string]func(string) Policy)
)
type staticUpstream struct { | 1 | package proxy
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"crypto/tls"
"github.com/mholt/caddy/caddyfile"
"github.com/mholt/caddy/caddyhttp/httpserver"
)
var (
supportedPolicies = make(map[string]func() Policy)
)
type staticUpstream struct {
from string
upstreamHeaders http.Header
downstreamHeaders http.Header
stop chan struct{} // Signals running goroutines to stop.
wg sync.WaitGroup // Used to wait for running goroutines to stop.
Hosts HostPool
Policy Policy
KeepAlive int
FailTimeout time.Duration
TryDuration time.Duration
TryInterval time.Duration
MaxConns int64
HealthCheck struct {
Client http.Client
Path string
Interval time.Duration
Timeout time.Duration
Host string
Port string
ContentString string
}
WithoutPathPrefix string
IgnoredSubPaths []string
insecureSkipVerify bool
MaxFails int32
}
// NewStaticUpstreams parses the configuration input and sets up
// static upstreams for the proxy middleware. The host string parameter,
// if not empty, is used for setting the upstream Host header for the
// health checks if the upstream header config requires it.
func NewStaticUpstreams(c caddyfile.Dispenser, host string) ([]Upstream, error) {
var upstreams []Upstream
for c.Next() {
upstream := &staticUpstream{
from: "",
stop: make(chan struct{}),
upstreamHeaders: make(http.Header),
downstreamHeaders: make(http.Header),
Hosts: nil,
Policy: &Random{},
MaxFails: 1,
TryInterval: 250 * time.Millisecond,
MaxConns: 0,
KeepAlive: http.DefaultMaxIdleConnsPerHost,
}
if !c.Args(&upstream.from) {
return upstreams, c.ArgErr()
}
var to []string
for _, t := range c.RemainingArgs() {
parsed, err := parseUpstream(t)
if err != nil {
return upstreams, err
}
to = append(to, parsed...)
}
for c.NextBlock() {
switch c.Val() {
case "upstream":
if !c.NextArg() {
return upstreams, c.ArgErr()
}
parsed, err := parseUpstream(c.Val())
if err != nil {
return upstreams, err
}
to = append(to, parsed...)
default:
if err := parseBlock(&c, upstream); err != nil {
return upstreams, err
}
}
}
if len(to) == 0 {
return upstreams, c.ArgErr()
}
upstream.Hosts = make([]*UpstreamHost, len(to))
for i, host := range to {
uh, err := upstream.NewHost(host)
if err != nil {
return upstreams, err
}
upstream.Hosts[i] = uh
}
if upstream.HealthCheck.Path != "" {
upstream.HealthCheck.Client = http.Client{
Timeout: upstream.HealthCheck.Timeout,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: upstream.insecureSkipVerify},
},
}
// set up health check upstream host if we have one
if host != "" {
hostHeader := upstream.upstreamHeaders.Get("Host")
if strings.Contains(hostHeader, "{host}") {
upstream.HealthCheck.Host = strings.Replace(hostHeader, "{host}", host, -1)
}
}
upstream.wg.Add(1)
go func() {
defer upstream.wg.Done()
upstream.HealthCheckWorker(upstream.stop)
}()
}
upstreams = append(upstreams, upstream)
}
return upstreams, nil
}
func (u *staticUpstream) From() string {
return u.from
}
func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) {
if !strings.HasPrefix(host, "http") &&
!strings.HasPrefix(host, "unix:") {
host = "http://" + host
}
uh := &UpstreamHost{
Name: host,
Conns: 0,
Fails: 0,
FailTimeout: u.FailTimeout,
Unhealthy: 0,
UpstreamHeaders: u.upstreamHeaders,
DownstreamHeaders: u.downstreamHeaders,
CheckDown: func(u *staticUpstream) UpstreamHostDownFunc {
return func(uh *UpstreamHost) bool {
if atomic.LoadInt32(&uh.Unhealthy) != 0 {
return true
}
if atomic.LoadInt32(&uh.Fails) >= u.MaxFails {
return true
}
return false
}
}(u),
WithoutPathPrefix: u.WithoutPathPrefix,
MaxConns: u.MaxConns,
}
baseURL, err := url.Parse(uh.Name)
if err != nil {
return nil, err
}
uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix, u.KeepAlive)
if u.insecureSkipVerify {
uh.ReverseProxy.UseInsecureTransport()
}
return uh, nil
}
func parseUpstream(u string) ([]string, error) {
if !strings.HasPrefix(u, "unix:") {
colonIdx := strings.LastIndex(u, ":")
protoIdx := strings.Index(u, "://")
if colonIdx != -1 && colonIdx != protoIdx {
us := u[:colonIdx]
ue := ""
portsEnd := len(u)
if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 {
portsEnd = colonIdx + nextSlash
ue = u[portsEnd:]
}
ports := u[len(us)+1 : portsEnd]
if separators := strings.Count(ports, "-"); separators == 1 {
portsStr := strings.Split(ports, "-")
pIni, err := strconv.Atoi(portsStr[0])
if err != nil {
return nil, err
}
pEnd, err := strconv.Atoi(portsStr[1])
if err != nil {
return nil, err
}
if pEnd <= pIni {
return nil, fmt.Errorf("port range [%s] is invalid", ports)
}
hosts := []string{}
for p := pIni; p <= pEnd; p++ {
hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue))
}
return hosts, nil
}
}
}
return []string{u}, nil
}
func parseBlock(c *caddyfile.Dispenser, u *staticUpstream) error {
switch c.Val() {
case "policy":
if !c.NextArg() {
return c.ArgErr()
}
policyCreateFunc, ok := supportedPolicies[c.Val()]
if !ok {
return c.ArgErr()
}
u.Policy = policyCreateFunc()
case "fail_timeout":
if !c.NextArg() {
return c.ArgErr()
}
dur, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.FailTimeout = dur
case "max_fails":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.Atoi(c.Val())
if err != nil {
return err
}
if n < 1 {
return c.Err("max_fails must be at least 1")
}
u.MaxFails = int32(n)
case "try_duration":
if !c.NextArg() {
return c.ArgErr()
}
dur, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.TryDuration = dur
case "try_interval":
if !c.NextArg() {
return c.ArgErr()
}
interval, err := time.ParseDuration(c.Val())
if err != nil {
return err
}
u.TryInterval = interval
case "max_conns":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.ParseInt(c.Val(), 10, 64)
if err != nil {
return err
}
u.MaxConns = n
case "health_check":
if !c.NextArg() {
return c.ArgErr()
}
u.HealthCheck.Path = c.Val()
// Set defaults
if u.HealthCheck.Interval == 0 {
u.HealthCheck.Interval = 30 * time.Second
}
if u.HealthCheck.Timeout == 0 {
u.HealthCheck.Timeout = 60 * time.Second
}
case "health_check_interval":
var interval string
if !c.Args(&interval) {
return c.ArgErr()
}
dur, err := time.ParseDuration(interval)
if err != nil {
return err
}
u.HealthCheck.Interval = dur
case "health_check_timeout":
var interval string
if !c.Args(&interval) {
return c.ArgErr()
}
dur, err := time.ParseDuration(interval)
if err != nil {
return err
}
u.HealthCheck.Timeout = dur
case "health_check_port":
if !c.NextArg() {
return c.ArgErr()
}
port := c.Val()
n, err := strconv.Atoi(port)
if err != nil {
return err
}
if n < 0 {
return c.Errf("invalid health_check_port '%s'", port)
}
u.HealthCheck.Port = port
case "health_check_contains":
if !c.NextArg() {
return c.ArgErr()
}
u.HealthCheck.ContentString = c.Val()
case "header_upstream":
var header, value string
if !c.Args(&header, &value) {
// When removing a header, the value can be optional.
if !strings.HasPrefix(header, "-") {
return c.ArgErr()
}
}
u.upstreamHeaders.Add(header, value)
case "header_downstream":
var header, value string
if !c.Args(&header, &value) {
// When removing a header, the value can be optional.
if !strings.HasPrefix(header, "-") {
return c.ArgErr()
}
}
u.downstreamHeaders.Add(header, value)
case "transparent":
u.upstreamHeaders.Add("Host", "{host}")
u.upstreamHeaders.Add("X-Real-IP", "{remote}")
u.upstreamHeaders.Add("X-Forwarded-For", "{remote}")
u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}")
case "websocket":
u.upstreamHeaders.Add("Connection", "{>Connection}")
u.upstreamHeaders.Add("Upgrade", "{>Upgrade}")
case "without":
if !c.NextArg() {
return c.ArgErr()
}
u.WithoutPathPrefix = c.Val()
case "except":
ignoredPaths := c.RemainingArgs()
if len(ignoredPaths) == 0 {
return c.ArgErr()
}
u.IgnoredSubPaths = ignoredPaths
case "insecure_skip_verify":
u.insecureSkipVerify = true
case "keepalive":
if !c.NextArg() {
return c.ArgErr()
}
n, err := strconv.Atoi(c.Val())
if err != nil {
return err
}
if n < 0 {
return c.ArgErr()
}
u.KeepAlive = n
default:
return c.Errf("unknown property '%s'", c.Val())
}
return nil
}
func (u *staticUpstream) healthCheck() {
for _, host := range u.Hosts {
hostURL := host.Name
if u.HealthCheck.Port != "" {
hostURL = replacePort(host.Name, u.HealthCheck.Port)
}
hostURL += u.HealthCheck.Path
unhealthy := func() bool {
// set up request, needed to be able to modify headers
// possible errors are bad HTTP methods or un-parsable urls
req, err := http.NewRequest("GET", hostURL, nil)
if err != nil {
return true
}
// set host for request going upstream
if u.HealthCheck.Host != "" {
req.Host = u.HealthCheck.Host
}
r, err := u.HealthCheck.Client.Do(req)
if err != nil {
return true
}
defer func() {
io.Copy(ioutil.Discard, r.Body)
r.Body.Close()
}()
if r.StatusCode < 200 || r.StatusCode >= 400 {
return true
}
if u.HealthCheck.ContentString == "" { // don't check for content string
return false
}
// TODO ReadAll will be replaced if deemed necessary
// See https://github.com/mholt/caddy/pull/1691
buf, err := ioutil.ReadAll(r.Body)
if err != nil {
return true
}
if bytes.Contains(buf, []byte(u.HealthCheck.ContentString)) {
return false
}
return true
}()
if unhealthy {
atomic.StoreInt32(&host.Unhealthy, 1)
} else {
atomic.StoreInt32(&host.Unhealthy, 0)
}
}
}
func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) {
ticker := time.NewTicker(u.HealthCheck.Interval)
u.healthCheck()
for {
select {
case <-ticker.C:
u.healthCheck()
case <-stop:
ticker.Stop()
return
}
}
}
func (u *staticUpstream) Select(r *http.Request) *UpstreamHost {
pool := u.Hosts
if len(pool) == 1 {
if !pool[0].Available() {
return nil
}
return pool[0]
}
allUnavailable := true
for _, host := range pool {
if host.Available() {
allUnavailable = false
break
}
}
if allUnavailable {
return nil
}
if u.Policy == nil {
return (&Random{}).Select(pool, r)
}
return u.Policy.Select(pool, r)
}
func (u *staticUpstream) AllowedPath(requestPath string) bool {
for _, ignoredSubPath := range u.IgnoredSubPaths {
if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(u.From(), ignoredSubPath)) {
return false
}
}
return true
}
// GetTryDuration returns u.TryDuration.
func (u *staticUpstream) GetTryDuration() time.Duration {
return u.TryDuration
}
// GetTryInterval returns u.TryInterval.
func (u *staticUpstream) GetTryInterval() time.Duration {
return u.TryInterval
}
func (u *staticUpstream) GetHostCount() int {
return len(u.Hosts)
}
// Stop sends a signal to all goroutines started by this staticUpstream to exit
// and waits for them to finish before returning.
func (u *staticUpstream) Stop() error {
close(u.stop)
u.wg.Wait()
return nil
}
// RegisterPolicy adds a custom policy to the proxy.
func RegisterPolicy(name string, policy func() Policy) {
supportedPolicies[name] = policy
}
func replacePort(originalURL string, newPort string) string {
parsedURL, err := url.Parse(originalURL)
if err != nil {
return originalURL
}
// handles 'localhost' and 'localhost:8080'
parsedHost, _, err := net.SplitHostPort(parsedURL.Host)
if err != nil {
parsedHost = parsedURL.Host
}
parsedURL.Host = net.JoinHostPort(parsedHost, newPort)
return parsedURL.String()
}
| 1 | 11,146 | Or maybe `func(args ...string) Policy` to make it dynamic in case of any future policy that may require more than one args. | caddyserver-caddy | go |
@@ -36,6 +36,7 @@ func serviceLoggedIn(ctx context.Context, config Config, session SessionInfo,
log.CWarningf(ctx,
"Failed to enable existing journals: %v", err)
}
+ jServer.MakeFBOsForExistingJournals(ctx)
}
err := config.MakeDiskBlockCacheIfNotExists()
if err != nil { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"github.com/keybase/client/go/libkb"
"golang.org/x/net/context"
)
// EnableAdminFeature returns true if admin features should be enabled
// for the currently-logged-in user.
func EnableAdminFeature(ctx context.Context, runMode libkb.RunMode, config Config) bool {
if runMode == libkb.DevelRunMode {
// All users in devel mode are admins.
return true
}
const sessionID = 0
session, err := config.KeybaseService().CurrentSession(ctx, sessionID)
if err != nil {
return false
}
return libkb.IsKeybaseAdmin(session.UID)
}
// serviceLoggedIn should be called when a new user logs in. It
// shouldn't be called again until after serviceLoggedOut is called.
func serviceLoggedIn(ctx context.Context, config Config, session SessionInfo,
bws TLFJournalBackgroundWorkStatus) {
log := config.MakeLogger("")
if jServer, err := GetJournalServer(config); err == nil {
err := jServer.EnableExistingJournals(
ctx, session.UID, session.VerifyingKey, bws)
if err != nil {
log.CWarningf(ctx,
"Failed to enable existing journals: %v", err)
}
}
err := config.MakeDiskBlockCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "serviceLoggedIn: Failed to enable disk cache: "+
"%+v", err)
}
mdServer := config.MDServer()
if mdServer != nil {
mdServer.RefreshAuthToken(ctx)
}
bServer := config.BlockServer()
if bServer != nil {
bServer.RefreshAuthToken(ctx)
}
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
}
// serviceLoggedOut should be called when the current user logs out.
func serviceLoggedOut(ctx context.Context, config Config) {
if jServer, err := GetJournalServer(config); err == nil {
jServer.shutdownExistingJournals(ctx)
}
config.ResetCaches()
config.UserHistory().Clear()
config.Chat().ClearCache()
mdServer := config.MDServer()
if mdServer != nil {
mdServer.RefreshAuthToken(ctx)
}
bServer := config.BlockServer()
if bServer != nil {
bServer.RefreshAuthToken(ctx)
}
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
// Clear any cached MD for all private TLFs, as they shouldn't be
// readable by a logged out user. We assume that a logged-out
// call always comes before a logged-in call.
config.KBFSOps().ClearPrivateFolderMD(ctx)
}
| 1 | 19,793 | Should this happen even if the above failed? | keybase-kbfs | go |
@@ -1,5 +1,5 @@
/*
-Copyright 2017 The OpenEBS Authors.
+Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"context"
"fmt"
"os"
)
// CheckError prints err to stderr and exits with code 1 if err is not nil. Otherwise, it is a
// no-op.
func CheckError(err error) {
if err != nil {
if err != context.Canceled {
fmt.Fprintf(os.Stderr, fmt.Sprintf("An error occurred: %v\n", err))
}
os.Exit(1)
}
}
| 1 | 10,764 | Please dont change this . | openebs-maya | go |
@@ -382,7 +382,7 @@ void nano::active_transactions::cleanup_election (nano::unique_lock<nano::mutex>
node.network.publish_filter.clear (block);
}
}
- node.logger.try_log (boost::str (boost::format ("Election erased for root %1%") % election.qualified_root.to_string ()));
+ node.logger.try_log (boost::str (boost::format ("Election erased for root %1%, confirmed: %2$b") % election.qualified_root.to_string () % election.confirmed ()));
}
std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_active (std::size_t max_a) | 1 | #include <nano/lib/threading.hpp>
#include <nano/node/active_transactions.hpp>
#include <nano/node/confirmation_height_processor.hpp>
#include <nano/node/confirmation_solicitor.hpp>
#include <nano/node/election.hpp>
#include <nano/node/node.hpp>
#include <nano/node/repcrawler.hpp>
#include <nano/secure/store.hpp>
#include <boost/format.hpp>
#include <boost/variant/get.hpp>
#include <numeric>
using namespace std::chrono;
std::size_t constexpr nano::active_transactions::max_active_elections_frontier_insertion;
constexpr std::chrono::minutes nano::active_transactions::expired_optimistic_election_info_cutoff;
nano::active_transactions::active_transactions (nano::node & node_a, nano::confirmation_height_processor & confirmation_height_processor_a) :
scheduler{ node_a.scheduler }, // Move dependencies requiring this circular reference
confirmation_height_processor{ confirmation_height_processor_a },
node{ node_a },
generator{ node_a.config, node_a.ledger, node_a.wallets, node_a.vote_processor, node_a.history, node_a.network, node_a.stats, false },
final_generator{ node_a.config, node_a.ledger, node_a.wallets, node_a.vote_processor, node_a.history, node_a.network, node_a.stats, true },
election_time_to_live{ node_a.network_params.network.is_dev_network () ? 0s : 2s },
thread ([this] () {
nano::thread_role::set (nano::thread_role::name::request_loop);
request_loop ();
})
{
// Register a callback which will get called after a block is cemented
confirmation_height_processor.add_cemented_observer ([this] (std::shared_ptr<nano::block> const & callback_block_a) {
this->block_cemented_callback (callback_block_a);
});
// Register a callback which will get called if a block is already cemented
confirmation_height_processor.add_block_already_cemented_observer ([this] (nano::block_hash const & hash_a) {
this->block_already_cemented_callback (hash_a);
});
nano::unique_lock<nano::mutex> lock (mutex);
condition.wait (lock, [&started = started] { return started; });
}
nano::active_transactions::~active_transactions ()
{
stop ();
}
bool nano::active_transactions::insert_election_from_frontiers_confirmation (std::shared_ptr<nano::block> const & block_a, nano::account const & account_a, nano::uint128_t previous_balance_a, nano::election_behavior election_behavior_a)
{
bool inserted{ false };
nano::unique_lock<nano::mutex> lock (mutex);
if (roots.get<tag_root> ().find (block_a->qualified_root ()) == roots.get<tag_root> ().end ())
{
std::function<void (std::shared_ptr<nano::block> const &)> election_confirmation_cb;
if (election_behavior_a == nano::election_behavior::optimistic)
{
election_confirmation_cb = [this] (std::shared_ptr<nano::block> const & block_a) {
--optimistic_elections_count;
};
}
auto insert_result = insert_impl (lock, block_a, previous_balance_a, election_behavior_a, election_confirmation_cb);
inserted = insert_result.inserted;
if (inserted)
{
insert_result.election->transition_active ();
if (insert_result.election->optimistic ())
{
++optimistic_elections_count;
}
}
}
return inserted;
}
nano::frontiers_confirmation_info nano::active_transactions::get_frontiers_confirmation_info ()
{
// Limit maximum count of elections to start
auto rep_counts (node.wallets.reps ());
bool representative (node.config.enable_voting && rep_counts.voting > 0);
bool half_princpal_representative (representative && rep_counts.have_half_rep ());
/* Check less frequently for regular nodes in auto mode */
bool agressive_mode (half_princpal_representative || node.config.frontiers_confirmation == nano::frontiers_confirmation_mode::always);
auto is_dev_network = node.network_params.network.is_dev_network ();
auto roots_size = size ();
auto check_time_exceeded = std::chrono::steady_clock::now () >= next_frontier_check;
auto max_elections = max_active_elections_frontier_insertion;
auto low_active_elections = roots_size < max_elections;
bool wallets_check_required = (!skip_wallets || !priority_wallet_cementable_frontiers.empty ()) && !agressive_mode;
// Minimise dropping real-time transactions, set the number of frontiers added to a factor of the maximum number of possible active elections
auto max_active = node.config.active_elections_size / 20;
if (roots_size <= max_active && (check_time_exceeded || wallets_check_required || (!is_dev_network && low_active_elections && agressive_mode)))
{
// When the number of active elections is low increase max number of elections for setting confirmation height.
if (max_active > roots_size + max_elections)
{
max_elections = max_active - roots_size;
}
}
else
{
max_elections = 0;
}
return nano::frontiers_confirmation_info{ max_elections, agressive_mode };
}
void nano::active_transactions::set_next_frontier_check (bool agressive_mode_a)
{
auto request_interval (std::chrono::milliseconds (node.network_params.network.request_interval_ms));
auto rel_time_next_frontier_check = request_interval * (agressive_mode_a ? 20 : 60);
// Decrease check time for dev network
int dev_network_factor = node.network_params.network.is_dev_network () ? 1000 : 1;
next_frontier_check = steady_clock::now () + (rel_time_next_frontier_check / dev_network_factor);
}
void nano::active_transactions::confirm_prioritized_frontiers (nano::transaction const & transaction_a, uint64_t max_elections_a, uint64_t & elections_count_a)
{
nano::unique_lock<nano::mutex> lk (mutex);
auto start_elections_for_prioritized_frontiers = [&transaction_a, &elections_count_a, max_elections_a, &lk, this] (prioritize_num_uncemented & cementable_frontiers) {
while (!cementable_frontiers.empty () && !this->stopped && elections_count_a < max_elections_a && optimistic_elections_count < max_optimistic ())
{
auto cementable_account_front_it = cementable_frontiers.get<tag_uncemented> ().begin ();
auto cementable_account = *cementable_account_front_it;
cementable_frontiers.get<tag_uncemented> ().erase (cementable_account_front_it);
if (expired_optimistic_election_infos.get<tag_account> ().count (cementable_account.account) == 0)
{
lk.unlock ();
nano::account_info info;
auto error = this->node.store.account.get (transaction_a, cementable_account.account, info);
if (!error)
{
if (!this->confirmation_height_processor.is_processing_block (info.head))
{
nano::confirmation_height_info confirmation_height_info;
this->node.store.confirmation_height.get (transaction_a, cementable_account.account, confirmation_height_info);
if (info.block_count > confirmation_height_info.height)
{
auto block (this->node.store.block.get (transaction_a, info.head));
auto previous_balance (this->node.ledger.balance (transaction_a, block->previous ()));
auto inserted_election = this->insert_election_from_frontiers_confirmation (block, cementable_account.account, previous_balance, nano::election_behavior::optimistic);
if (inserted_election)
{
++elections_count_a;
}
}
}
}
lk.lock ();
}
}
};
start_elections_for_prioritized_frontiers (priority_wallet_cementable_frontiers);
start_elections_for_prioritized_frontiers (priority_cementable_frontiers);
}
void nano::active_transactions::block_cemented_callback (std::shared_ptr<nano::block> const & block_a)
{
auto transaction = node.store.tx_begin_read ();
boost::optional<nano::election_status_type> election_status_type;
if (!confirmation_height_processor.is_processing_added_block (block_a->hash ()))
{
election_status_type = confirm_block (transaction, block_a);
}
else
{
// This block was explicitly added to the confirmation height_processor
election_status_type = nano::election_status_type::active_confirmed_quorum;
}
if (election_status_type.is_initialized ())
{
if (election_status_type == nano::election_status_type::inactive_confirmation_height)
{
nano::account account{};
nano::uint128_t amount (0);
bool is_state_send (false);
bool is_state_epoch (false);
nano::account pending_account{};
node.process_confirmed_data (transaction, block_a, block_a->hash (), account, amount, is_state_send, is_state_epoch, pending_account);
node.observers.blocks.notify (nano::election_status{ block_a, 0, 0, std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()), std::chrono::duration_values<std::chrono::milliseconds>::zero (), 0, 1, 0, nano::election_status_type::inactive_confirmation_height }, {}, account, amount, is_state_send, is_state_epoch);
}
else
{
auto hash (block_a->hash ());
nano::unique_lock<nano::mutex> election_winners_lk (election_winner_details_mutex);
auto existing (election_winner_details.find (hash));
if (existing != election_winner_details.end ())
{
auto election = existing->second;
election_winner_details.erase (hash);
election_winners_lk.unlock ();
if (election->confirmed () && election->winner ()->hash () == hash)
{
nano::unique_lock<nano::mutex> election_lk (election->mutex);
auto status_l = election->status;
election_lk.unlock ();
add_recently_cemented (status_l);
auto destination (block_a->link ().is_zero () ? block_a->destination () : block_a->link ().as_account ());
node.receive_confirmed (transaction, hash, destination);
nano::account account{};
nano::uint128_t amount (0);
bool is_state_send (false);
bool is_state_epoch (false);
nano::account pending_account{};
node.process_confirmed_data (transaction, block_a, hash, account, amount, is_state_send, is_state_epoch, pending_account);
election_lk.lock ();
election->status.type = *election_status_type;
election->status.confirmation_request_count = election->confirmation_request_count;
status_l = election->status;
election_lk.unlock ();
auto votes (election->votes_with_weight ());
node.observers.blocks.notify (status_l, votes, account, amount, is_state_send, is_state_epoch);
if (amount > 0)
{
node.observers.account_balance.notify (account, false);
if (!pending_account.is_zero ())
{
node.observers.account_balance.notify (pending_account, true);
}
}
}
}
}
auto const & account (!block_a->account ().is_zero () ? block_a->account () : block_a->sideband ().account);
debug_assert (!account.is_zero ());
if (!node.ledger.cache.final_votes_confirmation_canary.load () && account == node.network_params.ledger.final_votes_canary_account && block_a->sideband ().height >= node.network_params.ledger.final_votes_canary_height)
{
node.ledger.cache.final_votes_confirmation_canary.store (true);
}
// Next-block activations are done after cementing hardcoded bootstrap count to allow confirming very large chains without interference
bool const cemented_bootstrap_count_reached{ node.ledger.cache.cemented_count >= node.ledger.bootstrap_weight_max_blocks };
// Next-block activations are only done for blocks with previously active elections
bool const was_active{ *election_status_type == nano::election_status_type::active_confirmed_quorum || *election_status_type == nano::election_status_type::active_confirmation_height };
if (cemented_bootstrap_count_reached && was_active)
{
// Start or vote for the next unconfirmed block
scheduler.activate (account, transaction);
// Start or vote for the next unconfirmed block in the destination account
auto const & destination (node.ledger.block_destination (transaction, *block_a));
if (!destination.is_zero () && destination != account)
{
scheduler.activate (destination, transaction);
}
}
}
}
void nano::active_transactions::add_election_winner_details (nano::block_hash const & hash_a, std::shared_ptr<nano::election> const & election_a)
{
nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
election_winner_details.emplace (hash_a, election_a);
}
void nano::active_transactions::remove_election_winner_details (nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
election_winner_details.erase (hash_a);
}
void nano::active_transactions::block_already_cemented_callback (nano::block_hash const & hash_a)
{
// Depending on timing there is a situation where the election_winner_details is not reset.
// This can happen when a block wins an election, and the block is confirmed + observer
// called before the block hash gets added to election_winner_details. If the block is confirmed
// callbacks have already been done, so we can safely just remove it.
remove_election_winner_details (hash_a);
}
int64_t nano::active_transactions::vacancy () const
{
nano::lock_guard<nano::mutex> lock{ mutex };
auto result = static_cast<int64_t> (node.config.active_elections_size) - static_cast<int64_t> (roots.size ());
return result;
}
void nano::active_transactions::request_confirm (nano::unique_lock<nano::mutex> & lock_a)
{
debug_assert (lock_a.owns_lock ());
std::size_t const this_loop_target_l (roots.size ());
auto const elections_l{ list_active_impl (this_loop_target_l) };
lock_a.unlock ();
nano::confirmation_solicitor solicitor (node.network, node.config);
solicitor.prepare (node.rep_crawler.principal_representatives (std::numeric_limits<std::size_t>::max ()));
nano::vote_generator_session generator_session (generator);
nano::vote_generator_session final_generator_session (generator);
auto const election_ttl_cutoff_l (std::chrono::steady_clock::now () - election_time_to_live);
std::size_t unconfirmed_count_l (0);
nano::timer<std::chrono::milliseconds> elapsed (nano::timer_state::started);
/*
* Loop through active elections in descending order of proof-of-work difficulty, requesting confirmation
*
* Only up to a certain amount of elections are queued for confirmation request and block rebroadcasting. The remaining elections can still be confirmed if votes arrive
* Elections extending the soft config.active_elections_size limit are flushed after a certain time-to-live cutoff
* Flushed elections are later re-activated via frontier confirmation
*/
for (auto const & election_l : elections_l)
{
bool const confirmed_l (election_l->confirmed ());
unconfirmed_count_l += !confirmed_l;
if (election_l->transition_time (solicitor))
{
if (election_l->optimistic () && election_l->failed ())
{
if (election_l->confirmation_request_count != 0)
{
// Locks active mutex
add_expired_optimistic_election (*election_l);
}
--optimistic_elections_count;
}
// Locks active mutex, cleans up the election and erases it from the main container
if (!confirmed_l)
{
node.stats.inc (nano::stat::type::election, nano::stat::detail::election_drop_expired);
}
erase (election_l->qualified_root);
}
}
solicitor.flush ();
generator_session.flush ();
final_generator_session.flush ();
lock_a.lock ();
if (node.config.logging.timing_logging ())
{
node.logger.try_log (boost::str (boost::format ("Processed %1% elections (%2% were already confirmed) in %3% %4%") % this_loop_target_l % (this_loop_target_l - unconfirmed_count_l) % elapsed.value ().count () % elapsed.unit ()));
}
}
void nano::active_transactions::cleanup_election (nano::unique_lock<nano::mutex> & lock_a, nano::election const & election)
{
if (!election.confirmed ())
{
node.stats.inc (nano::stat::type::election, nano::stat::detail::election_drop_all);
}
auto blocks_l = election.blocks ();
for (auto const & [hash, block] : blocks_l)
{
auto erased (blocks.erase (hash));
(void)erased;
debug_assert (erased == 1);
erase_inactive_votes_cache (hash);
}
roots.get<tag_root> ().erase (roots.get<tag_root> ().find (election.qualified_root));
lock_a.unlock ();
vacancy_update ();
for (auto const & [hash, block] : blocks_l)
{
// Notify observers about dropped elections & blocks lost confirmed elections
if (!election.confirmed () || hash != election.winner ()->hash ())
{
node.observers.active_stopped.notify (hash);
}
if (!election.confirmed ())
{
// Clear from publish filter
node.network.publish_filter.clear (block);
}
}
node.logger.try_log (boost::str (boost::format ("Election erased for root %1%") % election.qualified_root.to_string ()));
}
std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_active (std::size_t max_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
return list_active_impl (max_a);
}
std::vector<std::shared_ptr<nano::election>> nano::active_transactions::list_active_impl (std::size_t max_a) const
{
std::vector<std::shared_ptr<nano::election>> result_l;
result_l.reserve (std::min (max_a, roots.size ()));
{
auto & sorted_roots_l (roots.get<tag_random_access> ());
std::size_t count_l{ 0 };
for (auto i = sorted_roots_l.begin (), n = sorted_roots_l.end (); i != n && count_l < max_a; ++i, ++count_l)
{
result_l.push_back (i->election);
}
}
return result_l;
}
void nano::active_transactions::add_expired_optimistic_election (nano::election const & election_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
auto account = election_a.status.winner->account ();
if (account.is_zero ())
{
account = election_a.status.winner->sideband ().account;
}
auto it = expired_optimistic_election_infos.get<tag_account> ().find (account);
if (it != expired_optimistic_election_infos.get<tag_account> ().end ())
{
expired_optimistic_election_infos.get<tag_account> ().modify (it, [] (auto & expired_optimistic_election) {
expired_optimistic_election.expired_time = std::chrono::steady_clock::now ();
expired_optimistic_election.election_started = false;
});
}
else
{
expired_optimistic_election_infos.emplace (std::chrono::steady_clock::now (), account);
}
// Expire the oldest one if a maximum is reached
auto const max_expired_optimistic_election_infos = 10000;
if (expired_optimistic_election_infos.size () > max_expired_optimistic_election_infos)
{
expired_optimistic_election_infos.get<tag_expired_time> ().erase (expired_optimistic_election_infos.get<tag_expired_time> ().begin ());
}
expired_optimistic_election_infos_size = expired_optimistic_election_infos.size ();
}
unsigned nano::active_transactions::max_optimistic ()
{
return node.ledger.cache.cemented_count < node.ledger.bootstrap_weight_max_blocks ? std::numeric_limits<unsigned>::max () : 50u;
}
void nano::active_transactions::frontiers_confirmation (nano::unique_lock<nano::mutex> & lock_a)
{
// Spend some time prioritizing accounts with the most uncemented blocks to reduce voting traffic
auto request_interval = std::chrono::milliseconds (node.network_params.network.request_interval_ms);
// Spend longer searching ledger accounts when there is a low amount of elections going on
auto low_active = roots.size () < 1000;
auto time_to_spend_prioritizing_ledger_accounts = request_interval / (low_active ? 20 : 100);
auto time_to_spend_prioritizing_wallet_accounts = request_interval / 250;
auto time_to_spend_confirming_pessimistic_accounts = time_to_spend_prioritizing_ledger_accounts;
lock_a.unlock ();
auto transaction = node.store.tx_begin_read ();
prioritize_frontiers_for_confirmation (transaction, node.network_params.network.is_dev_network () ? std::chrono::milliseconds (50) : time_to_spend_prioritizing_ledger_accounts, time_to_spend_prioritizing_wallet_accounts);
auto frontiers_confirmation_info = get_frontiers_confirmation_info ();
if (frontiers_confirmation_info.can_start_elections ())
{
uint64_t elections_count (0);
confirm_prioritized_frontiers (transaction, frontiers_confirmation_info.max_elections, elections_count);
confirm_expired_frontiers_pessimistically (transaction, frontiers_confirmation_info.max_elections, elections_count);
set_next_frontier_check (frontiers_confirmation_info.aggressive_mode);
}
lock_a.lock ();
}
/*
* This function takes the expired_optimistic_election_infos generated from failed elections from frontiers confirmations and starts
* confirming blocks at cemented height + 1 (cemented frontier successor) for an account only if all dependent blocks already
* confirmed.
*/
void nano::active_transactions::confirm_expired_frontiers_pessimistically (nano::transaction const & transaction_a, uint64_t max_elections_a, uint64_t & elections_count_a)
{
auto i{ node.store.account.begin (transaction_a, next_frontier_account) };
auto n{ node.store.account.end () };
nano::timer<std::chrono::milliseconds> timer (nano::timer_state::started);
nano::confirmation_height_info confirmation_height_info;
// Loop through any expired optimistic elections which have not been started yet. This tag stores already started ones first
std::vector<nano::account> elections_started_for_account;
for (auto i = expired_optimistic_election_infos.get<tag_election_started> ().lower_bound (false); i != expired_optimistic_election_infos.get<tag_election_started> ().end ();)
{
if (stopped || elections_count_a >= max_elections_a)
{
break;
}
auto const & account{ i->account };
nano::account_info account_info;
bool should_delete{ true };
if (!node.store.account.get (transaction_a, account, account_info))
{
node.store.confirmation_height.get (transaction_a, account, confirmation_height_info);
if (account_info.block_count > confirmation_height_info.height)
{
should_delete = false;
std::shared_ptr<nano::block> previous_block;
std::shared_ptr<nano::block> block;
if (confirmation_height_info.height == 0)
{
block = node.store.block.get (transaction_a, account_info.open_block);
}
else
{
previous_block = node.store.block.get (transaction_a, confirmation_height_info.frontier);
block = node.store.block.get (transaction_a, previous_block->sideband ().successor);
}
if (block && !node.confirmation_height_processor.is_processing_block (block->hash ()) && node.ledger.dependents_confirmed (transaction_a, *block))
{
nano::uint128_t previous_balance{ 0 };
if (previous_block && previous_block->balance ().is_zero ())
{
previous_balance = previous_block->sideband ().balance.number ();
}
auto inserted_election = insert_election_from_frontiers_confirmation (block, account, previous_balance, nano::election_behavior::normal);
if (inserted_election)
{
++elections_count_a;
}
elections_started_for_account.push_back (i->account);
}
}
}
if (should_delete)
{
// This account is confirmed already or doesn't exist.
i = expired_optimistic_election_infos.get<tag_election_started> ().erase (i);
expired_optimistic_election_infos_size = expired_optimistic_election_infos.size ();
}
else
{
++i;
}
}
for (auto const & account : elections_started_for_account)
{
auto it = expired_optimistic_election_infos.get<tag_account> ().find (account);
debug_assert (it != expired_optimistic_election_infos.get<tag_account> ().end ());
expired_optimistic_election_infos.get<tag_account> ().modify (it, [] (auto & expired_optimistic_election_info_a) {
expired_optimistic_election_info_a.election_started = true;
});
}
}
bool nano::active_transactions::should_do_frontiers_confirmation () const
{
auto pending_confirmation_height_size (confirmation_height_processor.awaiting_processing_size ());
auto disabled_confirmation_mode = (node.config.frontiers_confirmation == nano::frontiers_confirmation_mode::disabled);
auto conf_height_capacity_reached = pending_confirmation_height_size > confirmed_frontiers_max_pending_size;
auto all_cemented = node.ledger.cache.block_count == node.ledger.cache.cemented_count;
return (!disabled_confirmation_mode && !conf_height_capacity_reached && !all_cemented);
}
void nano::active_transactions::request_loop ()
{
nano::unique_lock<nano::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
// The wallets and active_transactions objects are mutually dependent, so we need a fully
// constructed node before proceeding.
this->node.node_initialized_latch.wait ();
lock.lock ();
while (!stopped && !node.flags.disable_request_loop)
{
// If many votes are queued, ensure at least the currently active ones finish processing
lock.unlock ();
if (node.vote_processor.half_full ())
{
node.vote_processor.flush_active ();
}
lock.lock ();
auto const stamp_l = std::chrono::steady_clock::now ();
request_confirm (lock);
if (!stopped)
{
auto const min_sleep_l = std::chrono::milliseconds (node.network_params.network.request_interval_ms / 2);
auto const wakeup_l = std::max (stamp_l + std::chrono::milliseconds (node.network_params.network.request_interval_ms), std::chrono::steady_clock::now () + min_sleep_l);
condition.wait_until (lock, wakeup_l, [&wakeup_l, &stopped = stopped] { return stopped || std::chrono::steady_clock::now () >= wakeup_l; });
}
}
}
bool nano::active_transactions::prioritize_account_for_confirmation (nano::active_transactions::prioritize_num_uncemented & cementable_frontiers_a, std::size_t & cementable_frontiers_size_a, nano::account const & account_a, nano::account_info const & info_a, uint64_t confirmation_height_a)
{
auto inserted_new{ false };
if (info_a.block_count > confirmation_height_a && !confirmation_height_processor.is_processing_block (info_a.head))
{
auto num_uncemented = info_a.block_count - confirmation_height_a;
nano::lock_guard<nano::mutex> guard (mutex);
auto it = cementable_frontiers_a.get<tag_account> ().find (account_a);
if (it != cementable_frontiers_a.get<tag_account> ().end ())
{
if (it->blocks_uncemented != num_uncemented)
{
// Account already exists and there is now a different uncemented block count so update it in the container
cementable_frontiers_a.get<tag_account> ().modify (it, [num_uncemented] (nano::cementable_account & info) {
info.blocks_uncemented = num_uncemented;
});
}
}
else
{
debug_assert (cementable_frontiers_size_a <= max_priority_cementable_frontiers);
if (cementable_frontiers_size_a == max_priority_cementable_frontiers)
{
// The maximum amount of frontiers stored has been reached. Check if the current frontier
// has more uncemented blocks than the lowest uncemented frontier in the collection if so replace it.
auto least_uncemented_frontier_it = cementable_frontiers_a.get<tag_uncemented> ().end ();
--least_uncemented_frontier_it;
if (num_uncemented > least_uncemented_frontier_it->blocks_uncemented)
{
cementable_frontiers_a.get<tag_uncemented> ().erase (least_uncemented_frontier_it);
cementable_frontiers_a.get<tag_account> ().emplace (account_a, num_uncemented);
}
}
else
{
inserted_new = true;
cementable_frontiers_a.get<tag_account> ().emplace (account_a, num_uncemented);
}
}
cementable_frontiers_size_a = cementable_frontiers_a.size ();
}
return inserted_new;
}
void nano::active_transactions::prioritize_frontiers_for_confirmation (nano::transaction const & transaction_a, std::chrono::milliseconds ledger_account_traversal_max_time_a, std::chrono::milliseconds wallet_account_traversal_max_time_a)
{
// Don't try to prioritize when there are a large number of pending confirmation heights as blocks can be cemented in the meantime, making the prioritization less reliable
if (confirmation_height_processor.awaiting_processing_size () < confirmed_frontiers_max_pending_size)
{
std::size_t priority_cementable_frontiers_size;
std::size_t priority_wallet_cementable_frontiers_size;
{
nano::lock_guard<nano::mutex> guard (mutex);
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
priority_wallet_cementable_frontiers_size = priority_wallet_cementable_frontiers.size ();
}
nano::timer<std::chrono::milliseconds> wallet_account_timer (nano::timer_state::started);
// Remove any old expired optimistic elections so they are no longer excluded in subsequent checks
auto expired_cutoff_it (expired_optimistic_election_infos.get<tag_expired_time> ().lower_bound (std::chrono::steady_clock::now () - expired_optimistic_election_info_cutoff));
expired_optimistic_election_infos.get<tag_expired_time> ().erase (expired_optimistic_election_infos.get<tag_expired_time> ().begin (), expired_cutoff_it);
expired_optimistic_election_infos_size = expired_optimistic_election_infos.size ();
auto num_new_inserted{ 0u };
auto should_iterate = [this, &num_new_inserted] () {
auto max_optimistic_l = max_optimistic ();
return !stopped && (max_optimistic_l > optimistic_elections_count && max_optimistic_l - optimistic_elections_count > num_new_inserted);
};
if (!skip_wallets)
{
// Prioritize wallet accounts first
{
nano::lock_guard<nano::mutex> lock (node.wallets.mutex);
auto wallet_transaction (node.wallets.tx_begin_read ());
auto const & items = node.wallets.items;
if (items.empty ())
{
skip_wallets = true;
}
for (auto item_it = items.cbegin (); item_it != items.cend () && should_iterate (); ++item_it)
{
// Skip this wallet if it has been traversed already while there are others still awaiting
if (wallet_ids_already_iterated.find (item_it->first) != wallet_ids_already_iterated.end ())
{
continue;
}
nano::account_info info;
auto & wallet (item_it->second);
nano::lock_guard<std::recursive_mutex> wallet_lock (wallet->store.mutex);
auto & next_wallet_frontier_account = next_wallet_id_accounts.emplace (item_it->first, wallet_store::special_count).first->second;
auto i (wallet->store.begin (wallet_transaction, next_wallet_frontier_account));
auto n (wallet->store.end ());
for (; i != n && should_iterate (); ++i)
{
auto const & account (i->first);
if (expired_optimistic_election_infos.get<tag_account> ().count (account) == 0 && !node.store.account.get (transaction_a, account, info))
{
nano::confirmation_height_info confirmation_height_info;
node.store.confirmation_height.get (transaction_a, account, confirmation_height_info);
// If it exists in normal priority collection delete from there.
auto it = priority_cementable_frontiers.find (account);
if (it != priority_cementable_frontiers.end ())
{
nano::lock_guard<nano::mutex> guard (mutex);
priority_cementable_frontiers.erase (it);
priority_cementable_frontiers_size = priority_cementable_frontiers.size ();
}
auto insert_newed = prioritize_account_for_confirmation (priority_wallet_cementable_frontiers, priority_wallet_cementable_frontiers_size, account, info, confirmation_height_info.height);
if (insert_newed)
{
++num_new_inserted;
}
if (wallet_account_timer.since_start () >= wallet_account_traversal_max_time_a)
{
break;
}
}
next_wallet_frontier_account = account.number () + 1;
}
// Go back to the beginning when we have reached the end of the wallet accounts for this wallet
if (i == n)
{
wallet_ids_already_iterated.emplace (item_it->first);
next_wallet_id_accounts.at (item_it->first) = wallet_store::special_count;
// Skip wallet accounts when they have all been traversed
if (std::next (item_it) == items.cend ())
{
wallet_ids_already_iterated.clear ();
skip_wallets = true;
}
}
}
}
}
nano::timer<std::chrono::milliseconds> timer (nano::timer_state::started);
auto i (node.store.account.begin (transaction_a, next_frontier_account));
auto n (node.store.account.end ());
for (; i != n && should_iterate (); ++i)
{
auto const & account (i->first);
auto const & info (i->second);
if (priority_wallet_cementable_frontiers.find (account) == priority_wallet_cementable_frontiers.end ())
{
if (expired_optimistic_election_infos.get<tag_account> ().count (account) == 0)
{
nano::confirmation_height_info confirmation_height_info;
node.store.confirmation_height.get (transaction_a, account, confirmation_height_info);
auto insert_newed = prioritize_account_for_confirmation (priority_cementable_frontiers, priority_cementable_frontiers_size, account, info, confirmation_height_info.height);
if (insert_newed)
{
++num_new_inserted;
}
}
}
next_frontier_account = account.number () + 1;
if (timer.since_start () >= ledger_account_traversal_max_time_a)
{
break;
}
}
// Go back to the beginning when we have reached the end of the accounts and start with wallet accounts next time
if (i == n)
{
next_frontier_account = 0;
skip_wallets = false;
}
}
}
void nano::active_transactions::stop ()
{
nano::unique_lock<nano::mutex> lock (mutex);
if (!started)
{
condition.wait (lock, [&started = started] { return started; });
}
stopped = true;
lock.unlock ();
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
generator.stop ();
final_generator.stop ();
lock.lock ();
roots.clear ();
}
nano::election_insertion_result nano::active_transactions::insert_impl (nano::unique_lock<nano::mutex> & lock_a, std::shared_ptr<nano::block> const & block_a, boost::optional<nano::uint128_t> const & previous_balance_a, nano::election_behavior election_behavior_a, std::function<void (std::shared_ptr<nano::block> const &)> const & confirmation_action_a)
{
debug_assert (lock_a.owns_lock ());
debug_assert (block_a->has_sideband ());
nano::election_insertion_result result;
if (!stopped)
{
auto root (block_a->qualified_root ());
auto existing (roots.get<tag_root> ().find (root));
if (existing == roots.get<tag_root> ().end ())
{
if (recently_confirmed.get<tag_root> ().find (root) == recently_confirmed.get<tag_root> ().end ())
{
result.inserted = true;
auto hash (block_a->hash ());
auto epoch (block_a->sideband ().details.epoch);
nano::uint128_t previous_balance (previous_balance_a.value_or (0));
debug_assert (!(previous_balance_a.value_or (0) > 0 && block_a->previous ().is_zero ()));
if (!previous_balance_a.is_initialized () && !block_a->previous ().is_zero ())
{
auto transaction (node.store.tx_begin_read ());
if (node.store.block.exists (transaction, block_a->previous ()))
{
previous_balance = node.ledger.balance (transaction, block_a->previous ());
}
}
result.election = nano::make_shared<nano::election> (
node, block_a, confirmation_action_a, [&node = node] (auto const & rep_a) {
// Representative is defined as online if replying to live votes or rep_crawler queries
node.online_reps.observe (rep_a);
},
election_behavior_a);
roots.get<tag_root> ().emplace (nano::active_transactions::conflict_info{ root, result.election, epoch, previous_balance });
blocks.emplace (hash, result.election);
auto const cache = find_inactive_votes_cache_impl (hash);
lock_a.unlock ();
result.election->insert_inactive_votes_cache (cache);
node.stats.inc (nano::stat::type::election, nano::stat::detail::election_start);
vacancy_update ();
}
}
else
{
result.election = existing->election;
}
if (lock_a.owns_lock ())
{
lock_a.unlock ();
}
// Votes are generated for inserted or ongoing elections
if (result.election)
{
result.election->generate_votes ();
}
}
return result;
}
// Validate a vote and apply it to the current election if one exists
nano::vote_code nano::active_transactions::vote (std::shared_ptr<nano::vote> const & vote_a)
{
nano::vote_code result{ nano::vote_code::indeterminate };
// If all hashes were recently confirmed then it is a replay
unsigned recently_confirmed_counter (0);
std::vector<std::pair<std::shared_ptr<nano::election>, nano::block_hash>> process;
{
nano::unique_lock<nano::mutex> lock (mutex);
for (auto vote_block : vote_a->blocks)
{
auto & recently_confirmed_by_hash (recently_confirmed.get<tag_hash> ());
if (vote_block.which ())
{
auto const & block_hash (boost::get<nano::block_hash> (vote_block));
auto existing (blocks.find (block_hash));
if (existing != blocks.end ())
{
process.emplace_back (existing->second, block_hash);
}
else if (recently_confirmed_by_hash.count (block_hash) == 0)
{
add_inactive_votes_cache (lock, block_hash, vote_a->account, vote_a->timestamp ());
}
else
{
++recently_confirmed_counter;
}
}
else
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
auto existing (roots.get<tag_root> ().find (block->qualified_root ()));
if (existing != roots.get<tag_root> ().end ())
{
process.emplace_back (existing->election, block->hash ());
}
else if (recently_confirmed_by_hash.count (block->hash ()) == 0)
{
add_inactive_votes_cache (lock, block->hash (), vote_a->account, vote_a->timestamp ());
}
else
{
++recently_confirmed_counter;
}
}
}
}
if (!process.empty ())
{
bool replay (false);
bool processed (false);
for (auto const & [election, block_hash] : process)
{
auto const result_l = election->vote (vote_a->account, vote_a->timestamp (), block_hash);
processed = processed || result_l.processed;
replay = replay || result_l.replay;
}
// Republish vote if it is new and the node does not host a principal representative (or close to)
if (processed)
{
auto const reps (node.wallets.reps ());
if (!reps.have_half_rep () && !reps.exists (vote_a->account))
{
node.network.flood_vote (vote_a, 0.5f);
}
}
result = replay ? nano::vote_code::replay : nano::vote_code::vote;
}
else if (recently_confirmed_counter == vote_a->blocks.size ())
{
result = nano::vote_code::replay;
}
return result;
}
bool nano::active_transactions::active (nano::qualified_root const & root_a)
{
nano::lock_guard<nano::mutex> lock (mutex);
return roots.get<tag_root> ().find (root_a) != roots.get<tag_root> ().end ();
}
bool nano::active_transactions::active (nano::block const & block_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
return roots.get<tag_root> ().find (block_a.qualified_root ()) != roots.get<tag_root> ().end () && blocks.find (block_a.hash ()) != blocks.end ();
}
std::shared_ptr<nano::election> nano::active_transactions::election (nano::qualified_root const & root_a) const
{
std::shared_ptr<nano::election> result;
nano::lock_guard<nano::mutex> lock (mutex);
auto existing = roots.get<tag_root> ().find (root_a);
if (existing != roots.get<tag_root> ().end ())
{
result = existing->election;
}
return result;
}
std::shared_ptr<nano::block> nano::active_transactions::winner (nano::block_hash const & hash_a) const
{
std::shared_ptr<nano::block> result;
nano::unique_lock<nano::mutex> lock (mutex);
auto existing = blocks.find (hash_a);
if (existing != blocks.end ())
{
auto election = existing->second;
lock.unlock ();
result = election->winner ();
}
return result;
}
std::deque<nano::election_status> nano::active_transactions::list_recently_cemented ()
{
nano::lock_guard<nano::mutex> lock (mutex);
return recently_cemented;
}
void nano::active_transactions::add_recently_cemented (nano::election_status const & status_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
recently_cemented.push_back (status_a);
if (recently_cemented.size () > node.config.confirmation_history_size)
{
recently_cemented.pop_front ();
}
}
void nano::active_transactions::add_recently_confirmed (nano::qualified_root const & root_a, nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
recently_confirmed.get<tag_sequence> ().emplace_back (root_a, hash_a);
if (recently_confirmed.size () > recently_confirmed_size)
{
recently_confirmed.get<tag_sequence> ().pop_front ();
}
}
void nano::active_transactions::erase_recently_confirmed (nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
recently_confirmed.get<tag_hash> ().erase (hash_a);
}
void nano::active_transactions::erase (nano::block const & block_a)
{
erase (block_a.qualified_root ());
}
void nano::active_transactions::erase (nano::qualified_root const & root_a)
{
nano::unique_lock<nano::mutex> lock (mutex);
auto root_it (roots.get<tag_root> ().find (root_a));
if (root_it != roots.get<tag_root> ().end ())
{
cleanup_election (lock, *root_it->election);
}
}
void nano::active_transactions::erase_hash (nano::block_hash const & hash_a)
{
nano::unique_lock<nano::mutex> lock (mutex);
[[maybe_unused]] auto erased (blocks.erase (hash_a));
debug_assert (erased == 1);
}
void nano::active_transactions::erase_oldest ()
{
nano::unique_lock<nano::mutex> lock (mutex);
if (!roots.empty ())
{
node.stats.inc (nano::stat::type::election, nano::stat::detail::election_drop_overflow);
auto item = roots.get<tag_random_access> ().front ();
cleanup_election (lock, *item.election);
}
}
bool nano::active_transactions::empty ()
{
nano::lock_guard<nano::mutex> lock (mutex);
return roots.empty ();
}
std::size_t nano::active_transactions::size ()
{
nano::lock_guard<nano::mutex> lock (mutex);
return roots.size ();
}
bool nano::active_transactions::publish (std::shared_ptr<nano::block> const & block_a)
{
nano::unique_lock<nano::mutex> lock (mutex);
auto existing (roots.get<tag_root> ().find (block_a->qualified_root ()));
auto result (true);
if (existing != roots.get<tag_root> ().end ())
{
auto election (existing->election);
lock.unlock ();
result = election->publish (block_a);
if (!result)
{
lock.lock ();
blocks.emplace (block_a->hash (), election);
auto const cache = find_inactive_votes_cache_impl (block_a->hash ());
lock.unlock ();
election->insert_inactive_votes_cache (cache);
node.stats.inc (nano::stat::type::election, nano::stat::detail::election_block_conflict);
}
}
return result;
}
// Returns the type of election status requiring callbacks calling later
boost::optional<nano::election_status_type> nano::active_transactions::confirm_block (nano::transaction const & transaction_a, std::shared_ptr<nano::block> const & block_a)
{
auto hash (block_a->hash ());
nano::unique_lock<nano::mutex> lock (mutex);
auto existing (blocks.find (hash));
boost::optional<nano::election_status_type> status_type;
if (existing != blocks.end ())
{
lock.unlock ();
nano::unique_lock<nano::mutex> election_lock (existing->second->mutex);
if (existing->second->status.winner && existing->second->status.winner->hash () == hash)
{
if (!existing->second->confirmed ())
{
existing->second->confirm_once (election_lock, nano::election_status_type::active_confirmation_height);
status_type = nano::election_status_type::active_confirmation_height;
}
else
{
#ifndef NDEBUG
nano::unique_lock<nano::mutex> election_winners_lk (election_winner_details_mutex);
debug_assert (election_winner_details.find (hash) != election_winner_details.cend ());
#endif
status_type = nano::election_status_type::active_confirmed_quorum;
}
}
else
{
status_type = boost::optional<nano::election_status_type>{};
}
}
else
{
status_type = nano::election_status_type::inactive_confirmation_height;
}
return status_type;
}
std::size_t nano::active_transactions::priority_cementable_frontiers_size ()
{
nano::lock_guard<nano::mutex> guard (mutex);
return priority_cementable_frontiers.size ();
}
std::size_t nano::active_transactions::priority_wallet_cementable_frontiers_size ()
{
nano::lock_guard<nano::mutex> guard (mutex);
return priority_wallet_cementable_frontiers.size ();
}
std::size_t nano::active_transactions::inactive_votes_cache_size ()
{
nano::lock_guard<nano::mutex> guard (mutex);
return inactive_votes_cache.size ();
}
void nano::active_transactions::add_inactive_votes_cache (nano::unique_lock<nano::mutex> & lock_a, nano::block_hash const & hash_a, nano::account const & representative_a, uint64_t const timestamp_a)
{
// Check principal representative status
if (node.ledger.weight (representative_a) > node.minimum_principal_weight ())
{
/** It is important that the new vote is added to the cache before calling inactive_votes_bootstrap_check
* This guarantees consistency when a vote is received while also receiving the corresponding block
*/
auto & inactive_by_hash (inactive_votes_cache.get<tag_hash> ());
auto existing (inactive_by_hash.find (hash_a));
if (existing != inactive_by_hash.end ())
{
if (existing->needs_eval ())
{
auto is_new (false);
inactive_by_hash.modify (existing, [representative_a, timestamp_a, &is_new] (nano::inactive_cache_information & info) {
auto it = std::find_if (info.voters.begin (), info.voters.end (), [&representative_a] (auto const & item_a) { return item_a.first == representative_a; });
is_new = (it == info.voters.end ());
if (is_new)
{
info.arrival = std::chrono::steady_clock::now ();
info.voters.emplace_back (representative_a, timestamp_a);
}
});
if (is_new)
{
auto const old_status = existing->status;
auto const status = inactive_votes_bootstrap_check (lock_a, existing->voters, hash_a, existing->status);
if (status != old_status)
{
// The lock has since been released
existing = inactive_by_hash.find (hash_a);
if (existing != inactive_by_hash.end ())
{
inactive_by_hash.modify (existing, [status] (nano::inactive_cache_information & info) {
info.status = status;
});
}
}
}
}
}
else
{
auto & inactive_by_arrival (inactive_votes_cache.get<tag_arrival> ());
nano::inactive_cache_status default_status{};
inactive_by_arrival.emplace (nano::inactive_cache_information{ std::chrono::steady_clock::now (), hash_a, representative_a, timestamp_a, default_status });
auto const status (inactive_votes_bootstrap_check (lock_a, representative_a, hash_a, default_status));
if (status != default_status)
{
// The lock has since been released
existing = inactive_by_hash.find (hash_a);
if (existing != inactive_by_hash.end ())
{
inactive_by_hash.modify (existing, [status] (nano::inactive_cache_information & info) {
info.status = status;
});
}
}
if (inactive_votes_cache.size () > node.flags.inactive_votes_cache_size)
{
inactive_by_arrival.erase (inactive_by_arrival.begin ());
}
}
}
}
void nano::active_transactions::trigger_inactive_votes_cache_election (std::shared_ptr<nano::block> const & block_a)
{
nano::unique_lock<nano::mutex> lock (mutex);
auto const status = find_inactive_votes_cache_impl (block_a->hash ()).status;
if (status.election_started)
{
insert_impl (lock, block_a);
}
}
nano::inactive_cache_information nano::active_transactions::find_inactive_votes_cache (nano::block_hash const & hash_a)
{
nano::lock_guard<nano::mutex> guard (mutex);
return find_inactive_votes_cache_impl (hash_a);
}
nano::inactive_cache_information nano::active_transactions::find_inactive_votes_cache_impl (nano::block_hash const & hash_a)
{
auto & inactive_by_hash (inactive_votes_cache.get<tag_hash> ());
auto existing (inactive_by_hash.find (hash_a));
if (existing != inactive_by_hash.end ())
{
return *existing;
}
else
{
return nano::inactive_cache_information{};
}
}
void nano::active_transactions::erase_inactive_votes_cache (nano::block_hash const & hash_a)
{
inactive_votes_cache.get<tag_hash> ().erase (hash_a);
}
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> & lock_a, nano::account const & voter_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{
debug_assert (lock_a.owns_lock ());
lock_a.unlock ();
return inactive_votes_bootstrap_check_impl (lock_a, node.ledger.weight (voter_a), 1, hash_a, previously_a);
}
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check (nano::unique_lock<nano::mutex> & lock_a, std::vector<std::pair<nano::account, uint64_t>> const & voters_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{
/** Perform checks on accumulated tally from inactive votes
* These votes are generally either for unconfirmed blocks or old confirmed blocks
* That check is made after hitting a tally threshold, and always as late and as few times as possible
*/
debug_assert (lock_a.owns_lock ());
lock_a.unlock ();
nano::uint128_t tally;
for (auto const & [voter, timestamp] : voters_a)
{
tally += node.ledger.weight (voter);
}
return inactive_votes_bootstrap_check_impl (lock_a, tally, voters_a.size (), hash_a, previously_a);
}
nano::inactive_cache_status nano::active_transactions::inactive_votes_bootstrap_check_impl (nano::unique_lock<nano::mutex> & lock_a, nano::uint128_t const & tally_a, std::size_t voters_size_a, nano::block_hash const & hash_a, nano::inactive_cache_status const & previously_a)
{
debug_assert (!lock_a.owns_lock ());
nano::inactive_cache_status status (previously_a);
const unsigned election_start_voters_min = node.network_params.network.is_dev_network () ? 2 : node.network_params.network.is_beta_network () ? 5
: 15;
status.tally = tally_a;
if (!previously_a.confirmed && tally_a >= node.online_reps.delta ())
{
status.bootstrap_started = true;
status.confirmed = true;
}
else if (!previously_a.bootstrap_started && !node.flags.disable_legacy_bootstrap && node.flags.disable_lazy_bootstrap && tally_a > node.gap_cache.bootstrap_threshold ())
{
status.bootstrap_started = true;
}
if (!previously_a.election_started && voters_size_a >= election_start_voters_min && tally_a >= (node.online_reps.trended () / 100) * node.config.election_hint_weight_percent)
{
status.election_started = true;
}
if ((status.election_started && !previously_a.election_started) || (status.bootstrap_started && !previously_a.bootstrap_started))
{
auto transaction (node.store.tx_begin_read ());
auto block = node.store.block.get (transaction, hash_a);
if (block && status.election_started && !previously_a.election_started && !node.block_confirmed_or_being_confirmed (transaction, hash_a))
{
lock_a.lock ();
insert_impl (lock_a, block);
}
else if (!block && status.bootstrap_started && !previously_a.bootstrap_started && (!node.ledger.pruning || !node.store.pruned.exists (transaction, hash_a)))
{
node.gap_cache.bootstrap_start (hash_a);
}
}
if (!lock_a.owns_lock ())
{
lock_a.lock ();
}
return status;
}
bool nano::purge_singleton_inactive_votes_cache_pool_memory ()
{
return boost::singleton_pool<boost::fast_pool_allocator_tag, sizeof (nano::active_transactions::ordered_cache::node_type)>::purge_memory ();
}
std::size_t nano::active_transactions::election_winner_details_size ()
{
nano::lock_guard<nano::mutex> guard (election_winner_details_mutex);
return election_winner_details.size ();
}
nano::cementable_account::cementable_account (nano::account const & account_a, std::size_t blocks_uncemented_a) :
account (account_a), blocks_uncemented (blocks_uncemented_a)
{
}
nano::expired_optimistic_election_info::expired_optimistic_election_info (std::chrono::steady_clock::time_point expired_time_a, nano::account account_a) :
expired_time (expired_time_a),
account (account_a)
{
}
bool nano::frontiers_confirmation_info::can_start_elections () const
{
return max_elections > 0;
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (active_transactions & active_transactions, std::string const & name)
{
std::size_t roots_count;
std::size_t blocks_count;
std::size_t recently_confirmed_count;
std::size_t recently_cemented_count;
{
nano::lock_guard<nano::mutex> guard (active_transactions.mutex);
roots_count = active_transactions.roots.size ();
blocks_count = active_transactions.blocks.size ();
recently_confirmed_count = active_transactions.recently_confirmed.size ();
recently_cemented_count = active_transactions.recently_cemented.size ();
}
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "election_winner_details", active_transactions.election_winner_details_size (), sizeof (decltype (active_transactions.election_winner_details)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "recently_confirmed", recently_confirmed_count, sizeof (decltype (active_transactions.recently_confirmed)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "recently_cemented", recently_cemented_count, sizeof (decltype (active_transactions.recently_cemented)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "priority_wallet_cementable_frontiers", active_transactions.priority_wallet_cementable_frontiers_size (), sizeof (nano::cementable_account) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "priority_cementable_frontiers", active_transactions.priority_cementable_frontiers_size (), sizeof (nano::cementable_account) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "expired_optimistic_election_infos", active_transactions.expired_optimistic_election_infos_size, sizeof (decltype (active_transactions.expired_optimistic_election_infos)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "inactive_votes_cache", active_transactions.inactive_votes_cache_size (), sizeof (nano::gap_information) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "optimistic_elections_count", active_transactions.optimistic_elections_count, 0 })); // This isn't an extra container, is just to expose the count easily
composite->add_component (collect_container_info (active_transactions.generator, "generator"));
return composite;
}
| 1 | 17,001 | There is a strange $b that looks wrong. | nanocurrency-nano-node | cpp |
@@ -1166,12 +1166,16 @@ def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraD
# speakStatesFirst: Speak the states before the role.
speakStatesFirst=role==controlTypes.ROLE_LINK
+ containerContainsText="" #: used for item counts for lists
+
# Determine what text to speak.
# Special cases
if speakEntry and childControlCount and fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states:
# List.
- # Translators: Speaks number of items in a list (example output: list with 5 items).
- return roleText+" "+_("with %s items")%childControlCount
+ # #7652: containerContainsText variable is set here, but the actual generation of all other output is handled further down in the general cases section.
+ # This ensures that properties such as name, states and level etc still get reported appropriately.
+ # Translators: Number of items in a list (example output: list with 5 items).
+ containerContainsText=_("with %s items")%childControlCount
elif fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_TABLE and tableID:
# Table.
return " ".join((nameText,roleText,stateText, getSpeechTextForProperties(_tableID=tableID, rowCount=attrs.get("table-rowcount"), columnCount=attrs.get("table-columncount")),levelText)) | 1 | # -*- coding: UTF-8 -*-
#speech.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V.
"""High-level functions to speak information.
"""
import itertools
import weakref
import unicodedata
import time
import colors
import globalVars
from logHandler import log
import api
import controlTypes
import config
import tones
import synthDriverHandler
from synthDriverHandler import *
import re
import textInfos
import queueHandler
import speechDictHandler
import characterProcessing
import languageHandler
speechMode_off=0
speechMode_beeps=1
speechMode_talk=2
#: How speech should be handled; one of speechMode_off, speechMode_beeps or speechMode_talk.
speechMode=speechMode_talk
speechMode_beeps_ms=15
beenCanceled=True
isPaused=False
curWordChars=[]
#Set containing locale codes for languages supporting conjunct characters
LANGS_WITH_CONJUNCT_CHARS = {'hi', 'as', 'bn', 'gu', 'kn', 'kok', 'ml', 'mni', 'mr', 'pa', 'te', 'ur', 'ta'}
#: The string used to separate distinct chunks of text when multiple chunks should be spoken without pauses.
# #555: Use two spaces so that numbers from adjacent chunks aren't treated as a single number
# for languages such as French and German which use space as a thousands separator.
CHUNK_SEPARATOR = " "
oldTreeLevel=None
oldTableID=None
oldRowNumber=None
oldRowSpan=None
oldColumnNumber=None
oldColumnSpan=None
def initialize():
"""Loads and sets the synth driver configured in nvda.ini."""
synthDriverHandler.initialize()
setSynth(config.conf["speech"]["synth"])
def terminate():
setSynth(None)
speechViewerObj=None
#: If a chunk of text contains only these characters, it will be considered blank.
BLANK_CHUNK_CHARS = frozenset((" ", "\n", "\r", "\0", u"\xa0"))
def isBlank(text):
"""Determine whether text should be reported as blank.
@param text: The text in question.
@type text: str
@return: C{True} if the text is blank, C{False} if not.
@rtype: bool
"""
return not text or set(text) <= BLANK_CHUNK_CHARS
RE_CONVERT_WHITESPACE = re.compile("[\0\r\n]")
def processText(locale,text,symbolLevel):
text = speechDictHandler.processText(text)
text = characterProcessing.processSpeechSymbols(locale, text, symbolLevel)
text = RE_CONVERT_WHITESPACE.sub(u" ", text)
return text.strip()
def getLastSpeechIndex():
"""Gets the last index passed by the synthesizer. Indexing is used so that its possible to find out when a certain peace of text has been spoken yet. Usually the character position of the text is passed to speak functions as the index.
@returns: the last index encountered
@rtype: int
"""
return getSynth().lastIndex
def cancelSpeech():
"""Interupts the synthesizer from currently speaking"""
global beenCanceled, isPaused, _speakSpellingGenerator
# Import only for this function to avoid circular import.
import sayAllHandler
sayAllHandler.stop()
speakWithoutPauses._pendingSpeechSequence=[]
speakWithoutPauses.lastSentIndex=None
if _speakSpellingGenerator:
_speakSpellingGenerator.close()
if beenCanceled:
return
elif speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
return
getSynth().cancel()
beenCanceled=True
isPaused=False
def pauseSpeech(switch):
global isPaused, beenCanceled
getSynth().pause(switch)
isPaused=switch
beenCanceled=False
def speakMessage(text,index=None):
"""Speaks a given message.
@param text: the message to speak
@type text: string
@param index: the index to mark this current text with, its best to use the character position of the text if you know it
@type index: int
"""
speakText(text,index=index,reason=controlTypes.REASON_MESSAGE)
def getCurrentLanguage():
synth=getSynth()
language=None
if synth:
try:
language=synth.language if config.conf['speech']['trustVoiceLanguage'] else None
except NotImplementedError:
pass
if language:
language=languageHandler.normalizeLanguage(language)
if not language:
language=languageHandler.getLanguage()
return language
def spellTextInfo(info,useCharacterDescriptions=False):
"""Spells the text from the given TextInfo, honouring any LangChangeCommand objects it finds if autoLanguageSwitching is enabled."""
if not config.conf['speech']['autoLanguageSwitching']:
speakSpelling(info.text,useCharacterDescriptions=useCharacterDescriptions)
return
curLanguage=None
for field in info.getTextWithFields({}):
if isinstance(field,basestring):
speakSpelling(field,curLanguage,useCharacterDescriptions=useCharacterDescriptions)
elif isinstance(field,textInfos.FieldCommand) and field.command=="formatChange":
curLanguage=field.field.get('language')
_speakSpellingGenerator=None
def speakSpelling(text,locale=None,useCharacterDescriptions=False):
global beenCanceled, _speakSpellingGenerator
import speechViewer
if speechViewer.isActive:
speechViewer.appendText(text)
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
defaultLanguage=getCurrentLanguage()
if not locale or (not config.conf['speech']['autoDialectSwitching'] and locale.split('_')[0]==defaultLanguage.split('_')[0]):
locale=defaultLanguage
if not text:
# Translators: This is spoken when NVDA moves to an empty line.
return getSynth().speak((_("blank"),))
if not text.isspace():
text=text.rstrip()
if _speakSpellingGenerator and _speakSpellingGenerator.gi_frame:
_speakSpellingGenerator.send((text,locale,useCharacterDescriptions))
else:
_speakSpellingGenerator=_speakSpellingGen(text,locale,useCharacterDescriptions)
try:
# Speak the first character before this function returns.
next(_speakSpellingGenerator)
except StopIteration:
return
queueHandler.registerGeneratorObject(_speakSpellingGenerator)
def getCharDescListFromText(text,locale):
"""This method prepares a list, which contains character and its description for all characters the text is made up of, by checking the presence of character descriptions in characterDescriptions.dic of that locale for all possible combination of consecutive characters in the text.
This is done to take care of conjunct characters present in several languages such as Hindi, Urdu, etc.
"""
charDescList = []
charDesc=None
i = len(text)
while i:
subText = text[:i]
charDesc = characterProcessing.getCharacterDescription(locale,subText)
if charDesc or i==1:
if not charDesc:
# #5375: We're down to a single character (i == 1) and we don't have a description.
# Try converting to lower case.
# This provides for upper case English characters (which only have lower case descriptions).
charDesc = characterProcessing.getCharacterDescription(locale,subText.lower())
charDescList.append((subText,charDesc))
text = text[i:]
i = len(text)
else:
i = i - 1
return charDescList
def _speakSpellingGen(text,locale,useCharacterDescriptions):
synth=getSynth()
synthConfig=config.conf["speech"][synth.name]
buf=[(text,locale,useCharacterDescriptions)]
for text,locale,useCharacterDescriptions in buf:
textLength=len(text)
count = 0
localeHasConjuncts = True if locale.split('_',1)[0] in LANGS_WITH_CONJUNCT_CHARS else False
charDescList = getCharDescListFromText(text,locale) if localeHasConjuncts else text
for item in charDescList:
if localeHasConjuncts:
# item is a tuple containing character and its description
char = item[0]
charDesc = item[1]
else:
# item is just a character.
char = item
if useCharacterDescriptions:
charDesc=characterProcessing.getCharacterDescription(locale,char.lower())
uppercase=char.isupper()
if useCharacterDescriptions and charDesc:
#Consider changing to multiple synth speech calls
char=charDesc[0] if textLength>1 else u"\u3001".join(charDesc)
else:
char=characterProcessing.processSpeechSymbol(locale,char)
if uppercase and synthConfig["sayCapForCapitals"]:
# Translators: cap will be spoken before the given letter when it is capitalized.
char=_("cap %s")%char
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
oldPitch=synthConfig["pitch"]
synth.pitch=max(0,min(oldPitch+synthConfig["capPitchChange"],100))
count = len(char)
index=count+1
log.io("Speaking character %r"%char)
speechSequence=[LangChangeCommand(locale)] if config.conf['speech']['autoLanguageSwitching'] else []
if len(char) == 1 and synthConfig["useSpellingFunctionality"]:
speechSequence.append(CharacterModeCommand(True))
if index is not None:
speechSequence.append(IndexCommand(index))
speechSequence.append(char)
synth.speak(speechSequence)
if uppercase and synth.isSupported("pitch") and synthConfig["capPitchChange"]:
synth.pitch=oldPitch
while textLength>1 and (isPaused or getLastSpeechIndex()!=index):
for x in xrange(2):
args=yield
if args: buf.append(args)
if uppercase and synthConfig["beepForCapitals"]:
tones.beep(2000,50)
args=yield
if args: buf.append(args)
def speakObjectProperties(obj,reason=controlTypes.REASON_QUERY,index=None,**allowedProperties):
#Fetch the values for all wanted properties
newPropertyValues={}
positionInfo=None
for name,value in allowedProperties.iteritems():
if name=="includeTableCellCoords":
# This is verbosity info.
newPropertyValues[name]=value
elif name.startswith('positionInfo_') and value:
if positionInfo is None:
positionInfo=obj.positionInfo
elif value:
try:
newPropertyValues[name]=getattr(obj,name)
except NotImplementedError:
pass
if positionInfo:
if allowedProperties.get('positionInfo_level',False) and 'level' in positionInfo:
newPropertyValues['positionInfo_level']=positionInfo['level']
if allowedProperties.get('positionInfo_indexInGroup',False) and 'indexInGroup' in positionInfo:
newPropertyValues['positionInfo_indexInGroup']=positionInfo['indexInGroup']
if allowedProperties.get('positionInfo_similarItemsInGroup',False) and 'similarItemsInGroup' in positionInfo:
newPropertyValues['positionInfo_similarItemsInGroup']=positionInfo['similarItemsInGroup']
#Fetched the cached properties and update them with the new ones
oldCachedPropertyValues=getattr(obj,'_speakObjectPropertiesCache',{}).copy()
cachedPropertyValues=oldCachedPropertyValues.copy()
cachedPropertyValues.update(newPropertyValues)
obj._speakObjectPropertiesCache=cachedPropertyValues
#If we should only cache we can stop here
if reason==controlTypes.REASON_ONLYCACHE:
return
#If only speaking change, then filter out all values that havn't changed
if reason==controlTypes.REASON_CHANGE:
for name in set(newPropertyValues)&set(oldCachedPropertyValues):
if newPropertyValues[name]==oldCachedPropertyValues[name]:
del newPropertyValues[name]
elif name=="states": #states need specific handling
oldStates=oldCachedPropertyValues[name]
newStates=newPropertyValues[name]
newPropertyValues['states']=newStates-oldStates
newPropertyValues['negativeStates']=oldStates-newStates
#properties such as states need to know the role to speak properly, give it as a _ name
newPropertyValues['_role']=newPropertyValues.get('role',obj.role)
# The real states are needed also, as the states entry might be filtered.
newPropertyValues['_states']=obj.states
if "rowNumber" in newPropertyValues or "columnNumber" in newPropertyValues:
# We're reporting table cell info, so pass the table ID.
try:
newPropertyValues["_tableID"]=obj.tableID
except NotImplementedError:
pass
newPropertyValues['current']=obj.isCurrent
if allowedProperties.get('placeholder', False):
newPropertyValues['placeholder']=obj.placeholder
#Get the speech text for the properties we want to speak, and then speak it
text=getSpeechTextForProperties(reason,**newPropertyValues)
if text:
speakText(text,index=index)
def _speakPlaceholderIfEmpty(info, obj, reason):
""" attempt to speak placeholder attribute if the textInfo 'info' is empty
@return: True if info was considered empty, and we attempted to speak the placeholder value.
False if info was not considered empty.
"""
textEmpty = obj._isTextEmpty
if textEmpty:
speakObjectProperties(obj,reason=reason,placeholder=True)
return True
return False
def speakObject(obj,reason=controlTypes.REASON_QUERY,index=None):
from NVDAObjects import NVDAObjectTextInfo
role=obj.role
# Choose when we should report the content of this object's textInfo, rather than just the object's value
import browseMode
shouldReportTextContent=not (
# focusEntered should never present text content
(reason==controlTypes.REASON_FOCUSENTERED) or
# The rootNVDAObject of a browseMode document in browse mode (not passThrough) should never present text content
(isinstance(obj.treeInterceptor,browseMode.BrowseModeDocumentTreeInterceptor) and not obj.treeInterceptor.passThrough and obj==obj.treeInterceptor.rootNVDAObject) or
# objects that do not report as having navigableText should not report their text content either
not obj._hasNavigableText
)
allowProperties={'name':True,'role':True,'roleText':True,'states':True,'value':True,'description':True,'keyboardShortcut':True,'positionInfo_level':True,'positionInfo_indexInGroup':True,'positionInfo_similarItemsInGroup':True,"cellCoordsText":True,"rowNumber":True,"columnNumber":True,"includeTableCellCoords":True,"columnCount":True,"rowCount":True,"rowHeaderText":True,"columnHeaderText":True,"rowSpan":True,"columnSpan":True}
if reason==controlTypes.REASON_FOCUSENTERED:
allowProperties["value"]=False
allowProperties["keyboardShortcut"]=False
allowProperties["positionInfo_level"]=False
# Aside from excluding some properties, focus entered should be spoken like focus.
reason=controlTypes.REASON_FOCUS
if not config.conf["presentation"]["reportObjectDescriptions"]:
allowProperties["description"]=False
if not config.conf["presentation"]["reportKeyboardShortcuts"]:
allowProperties["keyboardShortcut"]=False
if not config.conf["presentation"]["reportObjectPositionInformation"]:
allowProperties["positionInfo_level"]=False
allowProperties["positionInfo_indexInGroup"]=False
allowProperties["positionInfo_similarItemsInGroup"]=False
if reason!=controlTypes.REASON_QUERY:
allowProperties["rowCount"]=False
allowProperties["columnCount"]=False
formatConf=config.conf["documentFormatting"]
if not formatConf["reportTableCellCoords"]:
allowProperties["cellCoordsText"]=False
# rowNumber and columnNumber might be needed even if we're not reporting coordinates.
allowProperties["includeTableCellCoords"]=False
if not formatConf["reportTableHeaders"]:
allowProperties["rowHeaderText"]=False
allowProperties["columnHeaderText"]=False
if (not formatConf["reportTables"]
or (not formatConf["reportTableCellCoords"] and not formatConf["reportTableHeaders"])):
# We definitely aren't reporting any table info at all.
allowProperties["rowNumber"]=False
allowProperties["columnNumber"]=False
allowProperties["rowSpan"]=False
allowProperties["columnSpan"]=False
if shouldReportTextContent:
allowProperties['value']=False
speakObjectProperties(obj,reason=reason,index=index,**allowProperties)
if reason==controlTypes.REASON_ONLYCACHE:
return
if shouldReportTextContent:
try:
info=obj.makeTextInfo(textInfos.POSITION_SELECTION)
if not info.isCollapsed:
# if there is selected text, then there is a value and we do not report placeholder
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),info.text)
else:
info.expand(textInfos.UNIT_LINE)
_speakPlaceholderIfEmpty(info, obj, reason)
speakTextInfo(info,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET)
except:
newInfo=obj.makeTextInfo(textInfos.POSITION_ALL)
if not _speakPlaceholderIfEmpty(newInfo, obj, reason):
speakTextInfo(newInfo,unit=textInfos.UNIT_PARAGRAPH,reason=controlTypes.REASON_CARET)
elif role==controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.speechProvider:
try:
speak(mathPres.speechProvider.getSpeechForMathMl(obj.mathMl))
except (NotImplementedError, LookupError):
pass
def speakText(text,index=None,reason=controlTypes.REASON_MESSAGE,symbolLevel=None):
"""Speaks some text.
@param text: The text to speak.
@type text: str
@param index: The index to mark this text with, which can be used later to determine whether this piece of text has been spoken.
@type index: int
@param reason: The reason for this speech; one of the controlTypes.REASON_* constants.
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
speechSequence=[]
if index is not None:
speechSequence.append(IndexCommand(index))
if text is not None:
if isBlank(text):
# Translators: This is spoken when the line is considered blank.
text=_("blank")
speechSequence.append(text)
speak(speechSequence,symbolLevel=symbolLevel)
RE_INDENTATION_SPLIT = re.compile(r"^([^\S\r\n\f\v]*)(.*)$", re.UNICODE | re.DOTALL)
def splitTextIndentation(text):
"""Splits indentation from the rest of the text.
@param text: The text to split.
@type text: basestring
@return: Tuple of indentation and content.
@rtype: (basestring, basestring)
"""
return RE_INDENTATION_SPLIT.match(text).groups()
RE_INDENTATION_CONVERT = re.compile(r"(?P<char>\s)(?P=char)*", re.UNICODE)
IDT_BASE_FREQUENCY = 220 #One octave below middle A.
IDT_TONE_DURATION = 80 #Milleseconds
IDT_MAX_SPACES = 72
def getIndentationSpeech(indentation, formatConfig):
"""Retrieves the phrase to be spoken for a given string of indentation.
@param indentation: The string of indentation.
@type indentation: unicode
@param formatConfig: The configuration to use.
@type formatConfig: dict
@return: The phrase to be spoken.
@rtype: unicode
"""
speechIndentConfig = formatConfig["reportLineIndentation"]
toneIndentConfig = formatConfig["reportLineIndentationWithTones"] and speechMode == speechMode_talk
if not indentation:
if toneIndentConfig:
tones.beep(IDT_BASE_FREQUENCY, IDT_TONE_DURATION)
# Translators: This is spoken when the given line has no indentation.
return (_("no indent") if speechIndentConfig else "")
#The non-breaking space is semantically a space, so we replace it here.
indentation = indentation.replace(u"\xa0", u" ")
res = []
locale=languageHandler.getLanguage()
quarterTones = 0
for m in RE_INDENTATION_CONVERT.finditer(indentation):
raw = m.group()
symbol = characterProcessing.processSpeechSymbol(locale, raw[0])
count = len(raw)
if symbol == raw[0]:
# There is no replacement for this character, so do nothing.
res.append(raw)
elif count == 1:
res.append(symbol)
else:
res.append(u"{count} {symbol}".format(count=count, symbol=symbol))
quarterTones += (count*4 if raw[0]== "\t" else count)
speak = speechIndentConfig
if toneIndentConfig:
if quarterTones <= IDT_MAX_SPACES:
#Remove me during speech refactor.
pitch = IDT_BASE_FREQUENCY*2**(quarterTones/24.0) #24 quarter tones per octave.
tones.beep(pitch, IDT_TONE_DURATION)
else:
#we have more than 72 spaces (18 tabs), and must speak it since we don't want to hurt the users ears.
speak = True
return (" ".join(res) if speak else "")
def speak(speechSequence,symbolLevel=None):
"""Speaks a sequence of text and speech commands
@param speechSequence: the sequence of text and L{SpeechCommand} objects to speak
@param symbolLevel: The symbol verbosity level; C{None} (default) to use the user's configuration.
"""
if not speechSequence: #Pointless - nothing to speak
return
import speechViewer
if speechViewer.isActive:
for item in speechSequence:
if isinstance(item, basestring):
speechViewer.appendText(item)
global beenCanceled, curWordChars
curWordChars=[]
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
#Filter out redundant LangChangeCommand objects
#And also fill in default values
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
autoDialectSwitching=config.conf['speech']['autoDialectSwitching']
curLanguage=defaultLanguage=getCurrentLanguage()
prevLanguage=None
defaultLanguageRoot=defaultLanguage.split('_')[0]
oldSpeechSequence=speechSequence
speechSequence=[]
for item in oldSpeechSequence:
if isinstance(item,LangChangeCommand):
if not autoLanguageSwitching: continue
curLanguage=item.lang
if not curLanguage or (not autoDialectSwitching and curLanguage.split('_')[0]==defaultLanguageRoot):
curLanguage=defaultLanguage
elif isinstance(item,basestring):
if not item: continue
if autoLanguageSwitching and curLanguage!=prevLanguage:
speechSequence.append(LangChangeCommand(curLanguage))
prevLanguage=curLanguage
speechSequence.append(item)
else:
speechSequence.append(item)
if not speechSequence:
# After normalisation, the sequence is empty.
# There's nothing to speak.
return
log.io("Speaking %r" % speechSequence)
if symbolLevel is None:
symbolLevel=config.conf["speech"]["symbolLevel"]
curLanguage=defaultLanguage
inCharacterMode=False
for index in xrange(len(speechSequence)):
item=speechSequence[index]
if isinstance(item,CharacterModeCommand):
inCharacterMode=item.state
if autoLanguageSwitching and isinstance(item,LangChangeCommand):
curLanguage=item.lang
if isinstance(item,basestring):
speechSequence[index]=processText(curLanguage,item,symbolLevel)
if not inCharacterMode:
speechSequence[index]+=CHUNK_SEPARATOR
getSynth().speak(speechSequence)
def speakSelectionMessage(message,text):
if len(text) < 512:
speakMessage(message % text)
else:
# Translators: This is spoken when the user has selected a large portion of text. Example output "1000 characters"
speakMessage(message % _("%d characters") % len(text))
def speakSelectionChange(oldInfo,newInfo,speakSelected=True,speakUnselected=True,generalize=False):
"""Speaks a change in selection, either selected or unselected text.
@param oldInfo: a TextInfo instance representing what the selection was before
@type oldInfo: L{textInfos.TextInfo}
@param newInfo: a TextInfo instance representing what the selection is now
@type newInfo: L{textInfos.TextInfo}
@param generalize: if True, then this function knows that the text may have changed between the creation of the oldInfo and newInfo objects, meaning that changes need to be spoken more generally, rather than speaking the specific text, as the bounds may be all wrong.
@type generalize: boolean
"""
selectedTextList=[]
unselectedTextList=[]
if newInfo.isCollapsed and oldInfo.isCollapsed:
return
startToStart=newInfo.compareEndPoints(oldInfo,"startToStart")
startToEnd=newInfo.compareEndPoints(oldInfo,"startToEnd")
endToStart=newInfo.compareEndPoints(oldInfo,"endToStart")
endToEnd=newInfo.compareEndPoints(oldInfo,"endToEnd")
if speakSelected and oldInfo.isCollapsed:
selectedTextList.append(newInfo.text)
elif speakUnselected and newInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if startToEnd>0 or endToStart<0:
if speakSelected and not newInfo.isCollapsed:
selectedTextList.append(newInfo.text)
if speakUnselected and not oldInfo.isCollapsed:
unselectedTextList.append(oldInfo.text)
else:
if speakSelected and startToStart<0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"endToStart")
selectedTextList.append(tempInfo.text)
if speakSelected and endToEnd>0 and not newInfo.isCollapsed:
tempInfo=newInfo.copy()
tempInfo.setEndPoint(oldInfo,"startToEnd")
selectedTextList.append(tempInfo.text)
if startToStart>0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"endToStart")
unselectedTextList.append(tempInfo.text)
if endToEnd<0 and not oldInfo.isCollapsed:
tempInfo=oldInfo.copy()
tempInfo.setEndPoint(newInfo,"startToEnd")
unselectedTextList.append(tempInfo.text)
locale=getCurrentLanguage()
if speakSelected:
if not generalize:
for text in selectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken while the user is in the process of selecting something, For example: "hello selected"
speakSelectionMessage(_("%s selected"),text)
elif len(selectedTextList)>0:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been selected. for example 'selected hello world'
speakSelectionMessage(_("selected %s"),text)
if speakUnselected:
if not generalize:
for text in unselectedTextList:
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate what has been unselected. for example 'hello unselected'
speakSelectionMessage(_("%s unselected"),text)
elif len(unselectedTextList)>0:
if not newInfo.isCollapsed:
text=newInfo.text
if len(text)==1:
text=characterProcessing.processSpeechSymbol(locale,text)
# Translators: This is spoken to indicate when the previous selection was removed and a new selection was made. for example 'hello world selected instead'
speakSelectionMessage(_("%s selected instead"),text)
else:
# Translators: Reported when selection is removed.
speakMessage(_("selection removed"))
#: The number of typed characters for which to suppress speech.
_suppressSpeakTypedCharactersNumber = 0
#: The time at which suppressed typed characters were sent.
_suppressSpeakTypedCharactersTime = None
def _suppressSpeakTypedCharacters(number):
"""Suppress speaking of typed characters.
This should be used when sending a string of characters to the system
and those characters should not be spoken individually as if the user were typing them.
@param number: The number of characters to suppress.
@type number: int
"""
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
_suppressSpeakTypedCharactersNumber += number
_suppressSpeakTypedCharactersTime = time.time()
#: The character to use when masking characters in protected fields.
PROTECTED_CHAR = "*"
#: The first character which is not a Unicode control character.
#: This is used to test whether a character should be spoken as a typed character;
#: i.e. it should have a visual or spatial representation.
FIRST_NONCONTROL_CHAR = u" "
def speakTypedCharacters(ch):
global curWordChars
typingIsProtected=api.isTypingProtected()
if typingIsProtected:
realChar=PROTECTED_CHAR
else:
realChar=ch
if unicodedata.category(ch)[0] in "LMN":
curWordChars.append(realChar)
elif ch=="\b":
# Backspace, so remove the last character from our buffer.
del curWordChars[-1:]
elif ch==u'\u007f':
# delete character produced in some apps with control+backspace
return
elif len(curWordChars)>0:
typedWord="".join(curWordChars)
curWordChars=[]
if log.isEnabledFor(log.IO):
log.io("typed word: %s"%typedWord)
if config.conf["keyboard"]["speakTypedWords"] and not typingIsProtected:
speakText(typedWord)
global _suppressSpeakTypedCharactersNumber, _suppressSpeakTypedCharactersTime
if _suppressSpeakTypedCharactersNumber > 0:
# We primarily suppress based on character count and still have characters to suppress.
# However, we time out after a short while just in case.
suppress = time.time() - _suppressSpeakTypedCharactersTime <= 0.1
if suppress:
_suppressSpeakTypedCharactersNumber -= 1
else:
_suppressSpeakTypedCharactersNumber = 0
_suppressSpeakTypedCharactersTime = None
else:
suppress = False
if not suppress and config.conf["keyboard"]["speakTypedCharacters"] and ch >= FIRST_NONCONTROL_CHAR:
speakSpelling(realChar)
class SpeakTextInfoState(object):
"""Caches the state of speakTextInfo such as the current controlField stack, current formatfield and indentation."""
__slots__=[
'objRef',
'controlFieldStackCache',
'formatFieldAttributesCache',
'indentationCache',
]
def __init__(self,obj):
if isinstance(obj,SpeakTextInfoState):
oldState=obj
self.objRef=oldState.objRef
else:
self.objRef=weakref.ref(obj)
oldState=getattr(obj,'_speakTextInfoState',None)
self.controlFieldStackCache=list(oldState.controlFieldStackCache) if oldState else []
self.formatFieldAttributesCache=oldState.formatFieldAttributesCache if oldState else {}
self.indentationCache=oldState.indentationCache if oldState else ""
def updateObj(self):
obj=self.objRef()
if obj:
obj._speakTextInfoState=self.copy()
def copy(self):
return self.__class__(self)
def _speakTextInfo_addMath(speechSequence, info, field):
import mathPres
mathPres.ensureInit()
if not mathPres.speechProvider:
return
try:
speechSequence.extend(mathPres.speechProvider.getSpeechForMathMl(info.getMathMl(field)))
except (NotImplementedError, LookupError):
return
def speakTextInfo(info,useCache=True,formatConfig=None,unit=None,reason=controlTypes.REASON_QUERY,index=None,onlyInitialFields=False,suppressBlanks=False):
onlyCache=reason==controlTypes.REASON_ONLYCACHE
if isinstance(useCache,SpeakTextInfoState):
speakTextInfoState=useCache
elif useCache:
speakTextInfoState=SpeakTextInfoState(info.obj)
else:
speakTextInfoState=None
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
extraDetail=unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD)
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
if extraDetail:
formatConfig=formatConfig.copy()
formatConfig['extraDetail']=True
reportIndentation=unit==textInfos.UNIT_LINE and ( formatConfig["reportLineIndentation"] or formatConfig["reportLineIndentationWithTones"])
speechSequence=[]
#Fetch the last controlFieldStack, or make a blank one
controlFieldStackCache=speakTextInfoState.controlFieldStackCache if speakTextInfoState else []
formatFieldAttributesCache=speakTextInfoState.formatFieldAttributesCache if speakTextInfoState else {}
textWithFields=info.getTextWithFields(formatConfig)
# We don't care about node bounds, especially when comparing fields.
# Remove them.
for command in textWithFields:
if not isinstance(command,textInfos.FieldCommand):
continue
field=command.field
if not field:
continue
try:
del field["_startOfNode"]
except KeyError:
pass
try:
del field["_endOfNode"]
except KeyError:
pass
#Make a new controlFieldStack and formatField from the textInfo's initialFields
newControlFieldStack=[]
newFormatField=textInfos.FormatField()
initialFields=[]
for field in textWithFields:
if isinstance(field,textInfos.FieldCommand) and field.command in ("controlStart","formatChange"):
initialFields.append(field.field)
else:
break
if len(initialFields)>0:
del textWithFields[0:len(initialFields)]
endFieldCount=0
for field in reversed(textWithFields):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd":
endFieldCount+=1
else:
break
if endFieldCount>0:
del textWithFields[0-endFieldCount:]
for field in initialFields:
if isinstance(field,textInfos.ControlField):
newControlFieldStack.append(field)
elif isinstance(field,textInfos.FormatField):
newFormatField.update(field)
else:
raise ValueError("unknown field: %s"%field)
#Calculate how many fields in the old and new controlFieldStacks are the same
commonFieldCount=0
for count in xrange(min(len(newControlFieldStack),len(controlFieldStackCache))):
# #2199: When comparing controlFields try using uniqueID if it exists before resorting to compairing the entire dictionary
oldUniqueID=controlFieldStackCache[count].get('uniqueID')
newUniqueID=newControlFieldStack[count].get('uniqueID')
if ((oldUniqueID is not None or newUniqueID is not None) and newUniqueID==oldUniqueID) or (newControlFieldStack[count]==controlFieldStackCache[count]):
commonFieldCount+=1
else:
break
# #2591: Only if the reason is not focus, Speak the exit of any controlFields not in the new stack.
# We don't do this for focus because hearing "out of list", etc. isn't useful when tabbing or using quick navigation and makes navigation less efficient.
if reason!=controlTypes.REASON_FOCUS:
endingBlock=False
for count in reversed(xrange(commonFieldCount,len(controlFieldStackCache))):
text=info.getControlFieldSpeech(controlFieldStackCache[count],controlFieldStackCache[0:count],"end_removedFromControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
if not endingBlock and reason==controlTypes.REASON_SAYALL:
endingBlock=bool(int(controlFieldStackCache[count].get('isBlock',0)))
if endingBlock:
speechSequence.append(SpeakWithoutPausesBreakCommand())
# The TextInfo should be considered blank if we are only exiting fields (i.e. we aren't entering any new fields and there is no text).
isTextBlank=True
# Even when there's no speakable text, we still need to notify the synth of the index.
if index is not None:
speechSequence.append(IndexCommand(index))
#Get speech text for any fields that are in both controlFieldStacks, if extra detail is not requested
if not extraDetail:
for count in xrange(commonFieldCount):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
#Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack
for count in xrange(commonFieldCount,len(newControlFieldStack)):
field=newControlFieldStack[count]
text=info.getControlFieldSpeech(field,newControlFieldStack[0:count],"start_addedToControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
if field.get("role")==controlTypes.ROLE_MATH:
isTextBlank=False
_speakTextInfo_addMath(speechSequence,info,field)
commonFieldCount+=1
#Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField.
text=info.getFormatFieldSpeech(newFormatField,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail,initialFormat=True)
if text:
speechSequence.append(text)
if autoLanguageSwitching:
language=newFormatField.get('language')
speechSequence.append(LangChangeCommand(language))
lastLanguage=language
if onlyInitialFields or (unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD) and len(textWithFields)>0 and len(textWithFields[0])==1 and all((isinstance(x,textInfos.FieldCommand) and x.command=="controlEnd") for x in itertools.islice(textWithFields,1,None) )):
if not onlyCache:
if onlyInitialFields or any(isinstance(x,basestring) for x in speechSequence):
speak(speechSequence)
if not onlyInitialFields:
speakSpelling(textWithFields[0],locale=language if autoLanguageSwitching else None)
if useCache:
speakTextInfoState.controlFieldStackCache=newControlFieldStack
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
return
#Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands
#But also keep newControlFieldStack up to date as we will need it for the ends
# Add any text to a separate list, as it must be handled differently.
#Also make sure that LangChangeCommand objects are added before any controlField or formatField speech
relativeSpeechSequence=[]
inTextChunk=False
allIndentation=""
indentationDone=False
for command in textWithFields:
if isinstance(command,basestring):
if reportIndentation and not indentationDone:
indentation,command=splitTextIndentation(command)
# Combine all indentation into one string for later processing.
allIndentation+=indentation
if command:
# There was content after the indentation, so there is no more indentation.
indentationDone=True
if command:
if inTextChunk:
relativeSpeechSequence[-1]+=command
else:
relativeSpeechSequence.append(command)
inTextChunk=True
elif isinstance(command,textInfos.FieldCommand):
newLanguage=None
if command.command=="controlStart":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(command.field,newControlFieldStack,"start_relative",formatConfig,extraDetail,reason=reason)
newControlFieldStack.append(command.field)
elif command.command=="controlEnd":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldText=info.getControlFieldSpeech(newControlFieldStack[-1],newControlFieldStack[0:-1],"end_relative",formatConfig,extraDetail,reason=reason)
del newControlFieldStack[-1]
if commonFieldCount>len(newControlFieldStack):
commonFieldCount=len(newControlFieldStack)
elif command.command=="formatChange":
fieldText=info.getFormatFieldSpeech(command.field,formatFieldAttributesCache,formatConfig,reason=reason,unit=unit,extraDetail=extraDetail)
if fieldText:
inTextChunk=False
if autoLanguageSwitching:
newLanguage=command.field.get('language')
if lastLanguage!=newLanguage:
# The language has changed, so this starts a new text chunk.
inTextChunk=False
if not inTextChunk:
if fieldText:
if autoLanguageSwitching and lastLanguage is not None:
# Fields must be spoken in the default language.
relativeSpeechSequence.append(LangChangeCommand(None))
lastLanguage=None
relativeSpeechSequence.append(fieldText)
if command.command=="controlStart" and command.field.get("role")==controlTypes.ROLE_MATH:
_speakTextInfo_addMath(relativeSpeechSequence,info,command.field)
if autoLanguageSwitching and newLanguage!=lastLanguage:
relativeSpeechSequence.append(LangChangeCommand(newLanguage))
lastLanguage=newLanguage
if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache:
indentationSpeech=getIndentationSpeech(allIndentation, formatConfig)
if autoLanguageSwitching and speechSequence[-1].lang is not None:
# Indentation must be spoken in the default language,
# but the initial format field specified a different language.
# Insert the indentation before the LangChangeCommand.
speechSequence.insert(-1, indentationSpeech)
else:
speechSequence.append(indentationSpeech)
if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation
# Don't add this text if it is blank.
relativeBlank=True
for x in relativeSpeechSequence:
if isinstance(x,basestring) and not isBlank(x):
relativeBlank=False
break
if not relativeBlank:
speechSequence.extend(relativeSpeechSequence)
isTextBlank=False
#Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested
if autoLanguageSwitching and lastLanguage is not None:
speechSequence.append(LangChangeCommand(None))
lastLanguage=None
if not extraDetail:
for count in reversed(xrange(min(len(newControlFieldStack),commonFieldCount))):
text=info.getControlFieldSpeech(newControlFieldStack[count],newControlFieldStack[0:count],"end_inControlFieldStack",formatConfig,extraDetail,reason=reason)
if text:
speechSequence.append(text)
isTextBlank=False
# If there is nothing that should cause the TextInfo to be considered non-blank, blank should be reported, unless we are doing a say all.
if not suppressBlanks and reason != controlTypes.REASON_SAYALL and isTextBlank:
# Translators: This is spoken when the line is considered blank.
speechSequence.append(_("blank"))
#Cache a copy of the new controlFieldStack for future use
if useCache:
speakTextInfoState.controlFieldStackCache=list(newControlFieldStack)
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
if not onlyCache and speechSequence:
if reason==controlTypes.REASON_SAYALL:
speakWithoutPauses(speechSequence)
else:
speak(speechSequence)
def getSpeechTextForProperties(reason=controlTypes.REASON_QUERY,**propertyValues):
global oldTreeLevel, oldTableID, oldRowNumber, oldRowSpan, oldColumnNumber, oldColumnSpan
textList=[]
name=propertyValues.get('name')
if name:
textList.append(name)
if 'role' in propertyValues:
role=propertyValues['role']
speakRole=True
elif '_role' in propertyValues:
speakRole=False
role=propertyValues['_role']
else:
speakRole=False
role=controlTypes.ROLE_UNKNOWN
value=propertyValues.get('value') if role not in controlTypes.silentValuesForRoles else None
cellCoordsText=propertyValues.get('cellCoordsText')
rowNumber=propertyValues.get('rowNumber')
columnNumber=propertyValues.get('columnNumber')
includeTableCellCoords=propertyValues.get('includeTableCellCoords',True)
if role==controlTypes.ROLE_CHARTELEMENT:
speakRole=False
roleText=propertyValues.get('roleText')
if speakRole and (roleText or reason not in (controlTypes.REASON_SAYALL,controlTypes.REASON_CARET,controlTypes.REASON_FOCUS) or not (name or value or cellCoordsText or rowNumber or columnNumber) or role not in controlTypes.silentRolesOnFocus) and (role!=controlTypes.ROLE_MATH or reason not in (controlTypes.REASON_CARET,controlTypes.REASON_SAYALL)):
textList.append(roleText if roleText else controlTypes.roleLabels[role])
if value:
textList.append(value)
states=propertyValues.get('states',set())
realStates=propertyValues.get('_states',states)
negativeStates=propertyValues.get('negativeStates',set())
if states or negativeStates:
textList.extend(controlTypes.processAndLabelStates(role, realStates, reason, states, negativeStates))
if 'description' in propertyValues:
textList.append(propertyValues['description'])
if 'keyboardShortcut' in propertyValues:
textList.append(propertyValues['keyboardShortcut'])
if includeTableCellCoords and cellCoordsText:
textList.append(cellCoordsText)
if cellCoordsText or rowNumber or columnNumber:
tableID = propertyValues.get("_tableID")
# Always treat the table as different if there is no tableID.
sameTable = (tableID and tableID == oldTableID)
# Don't update the oldTableID if no tableID was given.
if tableID and not sameTable:
oldTableID = tableID
rowSpan = propertyValues.get("rowSpan")
columnSpan = propertyValues.get("columnSpan")
if rowNumber and (not sameTable or rowNumber != oldRowNumber or rowSpan != oldRowSpan):
rowHeaderText = propertyValues.get("rowHeaderText")
if rowHeaderText:
textList.append(rowHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current row number (example output: row 3).
textList.append(_("row %s")%rowNumber)
if rowSpan>1 and columnSpan<=1:
# Translators: Speaks the row span added to the current row number (example output: through 5).
textList.append(_("through %s")%(rowNumber+rowSpan-1))
oldRowNumber = rowNumber
oldRowSpan = rowSpan
if columnNumber and (not sameTable or columnNumber != oldColumnNumber or columnSpan != oldColumnSpan):
columnHeaderText = propertyValues.get("columnHeaderText")
if columnHeaderText:
textList.append(columnHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Speaks current column number (example output: column 3).
textList.append(_("column %s")%columnNumber)
if columnSpan>1 and rowSpan<=1:
# Translators: Speaks the column span added to the current column number (example output: through 5).
textList.append(_("through %s")%(columnNumber+columnSpan-1))
oldColumnNumber = columnNumber
oldColumnSpan = columnSpan
if includeTableCellCoords and not cellCoordsText and rowSpan>1 and columnSpan>1:
# Translators: Speaks the row and column span added to the current row and column numbers
# (example output: through row 5 column 3).
textList.append(_("through row {row} column {column}").format(
row=rowNumber+rowSpan-1,
column=columnNumber+columnSpan-1
))
rowCount=propertyValues.get('rowCount',0)
columnCount=propertyValues.get('columnCount',0)
if rowCount and columnCount:
# Translators: Speaks number of columns and rows in a table (example output: with 3 rows and 2 columns).
textList.append(_("with {rowCount} rows and {columnCount} columns").format(rowCount=rowCount,columnCount=columnCount))
elif columnCount and not rowCount:
# Translators: Speaks number of columns (example output: with 4 columns).
textList.append(_("with %s columns")%columnCount)
elif rowCount and not columnCount:
# Translators: Speaks number of rows (example output: with 2 rows).
textList.append(_("with %s rows")%rowCount)
if rowCount or columnCount:
# The caller is entering a table, so ensure that it is treated as a new table, even if the previous table was the same.
oldTableID = None
ariaCurrent = propertyValues.get('current', False)
if ariaCurrent:
try:
textList.append(controlTypes.isCurrentLabels[ariaCurrent])
except KeyError:
log.debugWarning("Aria-current value not handled: %s"%ariaCurrent)
textList.append(controlTypes.isCurrentLabels[True])
placeholder = propertyValues.get('placeholder', None)
if placeholder:
textList.append(placeholder)
indexInGroup=propertyValues.get('positionInfo_indexInGroup',0)
similarItemsInGroup=propertyValues.get('positionInfo_similarItemsInGroup',0)
if 0<indexInGroup<=similarItemsInGroup:
# Translators: Spoken to indicate the position of an item in a group of items (such as a list).
# {number} is replaced with the number of the item in the group.
# {total} is replaced with the total number of items in the group.
textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup))
if 'positionInfo_level' in propertyValues:
level=propertyValues.get('positionInfo_level',None)
role=propertyValues.get('role',None)
if level is not None:
if role in (controlTypes.ROLE_TREEVIEWITEM,controlTypes.ROLE_LISTITEM) and level!=oldTreeLevel:
textList.insert(0,_("level %s")%level)
oldTreeLevel=level
else:
# Translators: Speaks the item level in treeviews (example output: level 2).
textList.append(_('level %s')%propertyValues['positionInfo_level'])
return CHUNK_SEPARATOR.join([x for x in textList if x])
def getControlFieldSpeech(attrs,ancestorAttrs,fieldType,formatConfig=None,extraDetail=False,reason=None):
if attrs.get('isHidden'):
return u""
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
presCat=attrs.getPresentationCategory(ancestorAttrs,formatConfig, reason=reason)
childControlCount=int(attrs.get('_childcontrolcount',"0"))
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportName',False):
name=attrs.get('name',"")
else:
name=""
role=attrs.get('role',controlTypes.ROLE_UNKNOWN)
states=attrs.get('states',set())
keyboardShortcut=attrs.get('keyboardShortcut', "")
ariaCurrent=attrs.get('current', None)
placeholderValue=attrs.get('placeholder', None)
value=attrs.get('value',"")
if reason==controlTypes.REASON_FOCUS or attrs.get('alwaysReportDescription',False):
description=attrs.get('description',"")
else:
description=""
level=attrs.get('level',None)
if presCat != attrs.PRESCAT_LAYOUT:
tableID = attrs.get("table-id")
else:
tableID = None
roleText=attrs.get('roleText')
if not roleText:
roleText=getSpeechTextForProperties(reason=reason,role=role)
stateText=getSpeechTextForProperties(reason=reason,states=states,_role=role)
keyboardShortcutText=getSpeechTextForProperties(reason=reason,keyboardShortcut=keyboardShortcut) if config.conf["presentation"]["reportKeyboardShortcuts"] else ""
ariaCurrentText=getSpeechTextForProperties(reason=reason,current=ariaCurrent)
placeholderText=getSpeechTextForProperties(reason=reason,placeholder=placeholderValue)
nameText=getSpeechTextForProperties(reason=reason,name=name)
valueText=getSpeechTextForProperties(reason=reason,value=value)
descriptionText=(getSpeechTextForProperties(reason=reason,description=description)
if config.conf["presentation"]["reportObjectDescriptions"] else "")
levelText=getSpeechTextForProperties(reason=reason,positionInfo_level=level)
# Determine under what circumstances this node should be spoken.
# speakEntry: Speak when the user enters the control.
# speakWithinForLine: When moving by line, speak when the user is already within the control.
# speakExitForLine: When moving by line, speak when the user exits the control.
# speakExitForOther: When moving by word or character, speak when the user exits the control.
speakEntry=speakWithinForLine=speakExitForLine=speakExitForOther=False
if presCat == attrs.PRESCAT_SINGLELINE:
speakEntry=True
speakWithinForLine=True
speakExitForOther=True
elif presCat in (attrs.PRESCAT_MARKER, attrs.PRESCAT_CELL):
speakEntry=True
elif presCat == attrs.PRESCAT_CONTAINER:
speakEntry=True
speakExitForLine=True
speakExitForOther=True
# Determine the order of speech.
# speakContentFirst: Speak the content before the control field info.
speakContentFirst = reason == controlTypes.REASON_FOCUS and presCat != attrs.PRESCAT_CONTAINER and role not in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_COMBOBOX) and not tableID and controlTypes.STATE_EDITABLE not in states
# speakStatesFirst: Speak the states before the role.
speakStatesFirst=role==controlTypes.ROLE_LINK
# Determine what text to speak.
# Special cases
if speakEntry and childControlCount and fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_LIST and controlTypes.STATE_READONLY in states:
# List.
# Translators: Speaks number of items in a list (example output: list with 5 items).
return roleText+" "+_("with %s items")%childControlCount
elif fieldType=="start_addedToControlFieldStack" and role==controlTypes.ROLE_TABLE and tableID:
# Table.
return " ".join((nameText,roleText,stateText, getSpeechTextForProperties(_tableID=tableID, rowCount=attrs.get("table-rowcount"), columnCount=attrs.get("table-columncount")),levelText))
elif nameText and reason==controlTypes.REASON_FOCUS and fieldType == "start_addedToControlFieldStack" and role==controlTypes.ROLE_GROUPING:
# #3321: Report the name of groupings (such as fieldsets) for quicknav and focus jumps
return " ".join((nameText,roleText))
elif fieldType in ("start_addedToControlFieldStack","start_relative") and role in (controlTypes.ROLE_TABLECELL,controlTypes.ROLE_TABLECOLUMNHEADER,controlTypes.ROLE_TABLEROWHEADER) and tableID:
# Table cell.
reportTableHeaders = formatConfig["reportTableHeaders"]
reportTableCellCoords = formatConfig["reportTableCellCoords"]
getProps = {
'rowNumber': attrs.get("table-rownumber"),
'columnNumber': attrs.get("table-columnnumber"),
'rowSpan': attrs.get("table-rowsspanned"),
'columnSpan': attrs.get("table-columnsspanned"),
'includeTableCellCoords': reportTableCellCoords
}
if reportTableHeaders:
getProps['rowHeaderText'] = attrs.get("table-rowheadertext")
getProps['columnHeaderText'] = attrs.get("table-columnheadertext")
return (getSpeechTextForProperties(_tableID=tableID, **getProps)
+ (" %s" % stateText if stateText else "")
+ (" %s" % ariaCurrentText if ariaCurrent else ""))
# General cases
elif (
(speakEntry and ((speakContentFirst and fieldType in ("end_relative","end_inControlFieldStack")) or (not speakContentFirst and fieldType in ("start_addedToControlFieldStack","start_relative"))))
or (speakWithinForLine and not speakContentFirst and not extraDetail and fieldType=="start_inControlFieldStack")
):
out = []
content = attrs.get("content")
if content and speakContentFirst:
out.append(content)
if placeholderValue:
if valueText:
log.error("valueText exists when expected none: valueText:'%s' placeholderText:'%s'"%(valueText,placeholderText))
valueText = placeholderText
out.extend(x for x in (nameText,(stateText if speakStatesFirst else roleText),(roleText if speakStatesFirst else stateText),ariaCurrentText,valueText,descriptionText,levelText,keyboardShortcutText) if x)
if content and not speakContentFirst:
out.append(content)
return CHUNK_SEPARATOR.join(out)
elif fieldType in ("end_removedFromControlFieldStack","end_relative") and roleText and ((not extraDetail and speakExitForLine) or (extraDetail and speakExitForOther)):
# Translators: Indicates end of something (example output: at the end of a list, speaks out of list).
return _("out of %s")%roleText
# Special cases
elif not speakEntry and fieldType in ("start_addedToControlFieldStack","start_relative"):
out = []
if not extraDetail and controlTypes.STATE_CLICKABLE in states:
# Clickable.
out.append(getSpeechTextForProperties(states=set([controlTypes.STATE_CLICKABLE])))
if ariaCurrent:
out.append(ariaCurrentText)
return CHUNK_SEPARATOR.join(out)
else:
return ""
def getFormatFieldSpeech(attrs,attrsCache=None,formatConfig=None,reason=None,unit=None,extraDetail=False , initialFormat=False, separator=CHUNK_SEPARATOR):
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
textList=[]
if formatConfig["reportTables"]:
tableInfo=attrs.get("table-info")
oldTableInfo=attrsCache.get("table-info") if attrsCache is not None else None
text=getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=extraDetail)
if text:
textList.append(text)
if formatConfig["reportPage"]:
pageNumber=attrs.get("page-number")
oldPageNumber=attrsCache.get("page-number") if attrsCache is not None else None
if pageNumber and pageNumber!=oldPageNumber:
# Translators: Indicates the page number in a document.
# %s will be replaced with the page number.
text=_("page %s")%pageNumber
textList.append(text)
sectionNumber=attrs.get("section-number")
oldSectionNumber=attrsCache.get("section-number") if attrsCache is not None else None
if sectionNumber and sectionNumber!=oldSectionNumber:
# Translators: Indicates the section number in a document.
# %s will be replaced with the section number.
text=_("section %s")%sectionNumber
textList.append(text)
textColumnCount=attrs.get("text-column-count")
oldTextColumnCount=attrsCache.get("text-column-count") if attrsCache is not None else None
textColumnNumber=attrs.get("text-column-number")
oldTextColumnNumber=attrsCache.get("text-column-number") if attrsCache is not None else None
# Because we do not want to report the number of columns when a document is just opened and there is only
# one column. This would be verbose, in the standard case.
# column number has changed, or the columnCount has changed
# but not if the columnCount is 1 or less and there is no old columnCount.
if (((textColumnNumber and textColumnNumber!=oldTextColumnNumber) or
(textColumnCount and textColumnCount!=oldTextColumnCount)) and not
(textColumnCount and int(textColumnCount) <=1 and oldTextColumnCount == None)) :
if textColumnNumber and textColumnCount:
# Translators: Indicates the text column number in a document.
# {0} will be replaced with the text column number.
# {1} will be replaced with the number of text columns.
text=_("column {0} of {1}").format(textColumnNumber,textColumnCount)
textList.append(text)
elif textColumnCount:
# Translators: Indicates the text column number in a document.
# %s will be replaced with the number of text columns.
text=_("%s columns")%(textColumnCount)
textList.append(text)
sectionBreakType=attrs.get("section-break")
if sectionBreakType:
if sectionBreakType == "0" : # Continuous section break.
text=_("continuous section break")
elif sectionBreakType == "1" : # New column section break.
text=_("new column section break")
elif sectionBreakType == "2" : # New page section break.
text=_("new page section break")
elif sectionBreakType == "3" : # Even pages section break.
text=_("even pages section break")
elif sectionBreakType == "4" : # Odd pages section break.
text=_("odd pages section break")
else:
text=""
textList.append(text)
columnBreakType=attrs.get("column-break")
if columnBreakType:
textList.append(_("column break"))
if formatConfig["reportHeadings"]:
headingLevel=attrs.get("heading-level")
oldHeadingLevel=attrsCache.get("heading-level") if attrsCache is not None else None
# headings should be spoken not only if they change, but also when beginning to speak lines or paragraphs
# Ensuring a similar experience to if a heading was a controlField
if headingLevel and (initialFormat and (reason==controlTypes.REASON_FOCUS or unit in (textInfos.UNIT_LINE,textInfos.UNIT_PARAGRAPH)) or headingLevel!=oldHeadingLevel):
# Translators: Speaks the heading level (example output: heading level 2).
text=_("heading level %d")%headingLevel
textList.append(text)
if formatConfig["reportStyle"]:
style=attrs.get("style")
oldStyle=attrsCache.get("style") if attrsCache is not None else None
if style!=oldStyle:
if style:
# Translators: Indicates the style of text.
# A style is a collection of formatting settings and depends on the application.
# %s will be replaced with the name of the style.
text=_("style %s")%style
else:
# Translators: Indicates that text has reverted to the default style.
# A style is a collection of formatting settings and depends on the application.
text=_("default style")
textList.append(text)
if formatConfig["reportBorderStyle"]:
borderStyle=attrs.get("border-style")
oldBorderStyle=attrsCache.get("border-style") if attrsCache is not None else None
if borderStyle!=oldBorderStyle:
if borderStyle:
text=borderStyle
else:
# Translators: Indicates that cell does not have border lines.
text=_("no border lines")
textList.append(text)
if formatConfig["reportFontName"]:
fontFamily=attrs.get("font-family")
oldFontFamily=attrsCache.get("font-family") if attrsCache is not None else None
if fontFamily and fontFamily!=oldFontFamily:
textList.append(fontFamily)
fontName=attrs.get("font-name")
oldFontName=attrsCache.get("font-name") if attrsCache is not None else None
if fontName and fontName!=oldFontName:
textList.append(fontName)
if formatConfig["reportFontSize"]:
fontSize=attrs.get("font-size")
oldFontSize=attrsCache.get("font-size") if attrsCache is not None else None
if fontSize and fontSize!=oldFontSize:
textList.append(fontSize)
if formatConfig["reportColor"]:
color=attrs.get("color")
oldColor=attrsCache.get("color") if attrsCache is not None else None
backgroundColor=attrs.get("background-color")
oldBackgroundColor=attrsCache.get("background-color") if attrsCache is not None else None
backgroundColor2=attrs.get("background-color2")
oldBackgroundColor2=attrsCache.get("background-color2") if attrsCache is not None else None
bgColorChanged=backgroundColor!=oldBackgroundColor or backgroundColor2!=oldBackgroundColor2
bgColorText=backgroundColor.name if isinstance(backgroundColor,colors.RGB) else unicode(backgroundColor)
if backgroundColor2:
bg2Name=backgroundColor2.name if isinstance(backgroundColor2,colors.RGB) else unicode(backgroundColor2)
# Translators: Reported when there are two background colors.
# This occurs when, for example, a gradient pattern is applied to a spreadsheet cell.
# {color1} will be replaced with the first background color.
# {color2} will be replaced with the second background color.
bgColorText=_("{color1} to {color2}").format(color1=bgColorText,color2=bg2Name)
if color and backgroundColor and color!=oldColor and bgColorChanged:
# Translators: Reported when both the text and background colors change.
# {color} will be replaced with the text color.
# {backgroundColor} will be replaced with the background color.
textList.append(_("{color} on {backgroundColor}").format(
color=color.name if isinstance(color,colors.RGB) else unicode(color),
backgroundColor=bgColorText))
elif color and color!=oldColor:
# Translators: Reported when the text color changes (but not the background color).
# {color} will be replaced with the text color.
textList.append(_("{color}").format(color=color.name if isinstance(color,colors.RGB) else unicode(color)))
elif backgroundColor and bgColorChanged:
# Translators: Reported when the background color changes (but not the text color).
# {backgroundColor} will be replaced with the background color.
textList.append(_("{backgroundColor} background").format(backgroundColor=bgColorText))
backgroundPattern=attrs.get("background-pattern")
oldBackgroundPattern=attrsCache.get("background-pattern") if attrsCache is not None else None
if backgroundPattern and backgroundPattern!=oldBackgroundPattern:
textList.append(_("background pattern {pattern}").format(pattern=backgroundPattern))
if formatConfig["reportLineNumber"]:
lineNumber=attrs.get("line-number")
oldLineNumber=attrsCache.get("line-number") if attrsCache is not None else None
if lineNumber is not None and lineNumber!=oldLineNumber:
# Translators: Indicates the line number of the text.
# %s will be replaced with the line number.
text=_("line %s")%lineNumber
textList.append(text)
if formatConfig["reportRevisions"]:
# Insertion
revision=attrs.get("revision-insertion")
oldRevision=attrsCache.get("revision-insertion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been inserted
text=(_("inserted") if revision
# Translators: Reported when text is no longer marked as having been inserted.
else _("not inserted"))
textList.append(text)
revision=attrs.get("revision-deletion")
oldRevision=attrsCache.get("revision-deletion") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is marked as having been deleted
text=(_("deleted") if revision
# Translators: Reported when text is no longer marked as having been deleted.
else _("not deleted"))
textList.append(text)
revision=attrs.get("revision")
oldRevision=attrsCache.get("revision") if attrsCache is not None else None
if (revision or oldRevision is not None) and revision!=oldRevision:
# Translators: Reported when text is revised.
text=(_("revised %s"%revision) if revision
# Translators: Reported when text is not revised.
else _("no revised %s")%oldRevision)
textList.append(text)
if formatConfig["reportEmphasis"]:
# marked text
marked=attrs.get("marked")
oldMarked=attrsCache.get("marked") if attrsCache is not None else None
if (marked or oldMarked is not None) and marked!=oldMarked:
# Translators: Reported when text is marked
text=(_("marked") if marked
# Translators: Reported when text is no longer marked
else _("not marked"))
textList.append(text)
# strong text
strong=attrs.get("strong")
oldStrong=attrsCache.get("strong") if attrsCache is not None else None
if (strong or oldStrong is not None) and strong!=oldStrong:
# Translators: Reported when text is marked as strong (e.g. bold)
text=(_("strong") if strong
# Translators: Reported when text is no longer marked as strong (e.g. bold)
else _("not strong"))
textList.append(text)
# emphasised text
emphasised=attrs.get("emphasised")
oldEmphasised=attrsCache.get("emphasised") if attrsCache is not None else None
if (emphasised or oldEmphasised is not None) and emphasised!=oldEmphasised:
# Translators: Reported when text is marked as emphasised
text=(_("emphasised") if emphasised
# Translators: Reported when text is no longer marked as emphasised
else _("not emphasised"))
textList.append(text)
if formatConfig["reportFontAttributes"]:
bold=attrs.get("bold")
oldBold=attrsCache.get("bold") if attrsCache is not None else None
if (bold or oldBold is not None) and bold!=oldBold:
# Translators: Reported when text is bolded.
text=(_("bold") if bold
# Translators: Reported when text is not bolded.
else _("no bold"))
textList.append(text)
italic=attrs.get("italic")
oldItalic=attrsCache.get("italic") if attrsCache is not None else None
if (italic or oldItalic is not None) and italic!=oldItalic:
# Translators: Reported when text is italicized.
text=(_("italic") if italic
# Translators: Reported when text is not italicized.
else _("no italic"))
textList.append(text)
strikethrough=attrs.get("strikethrough")
oldStrikethrough=attrsCache.get("strikethrough") if attrsCache is not None else None
if (strikethrough or oldStrikethrough is not None) and strikethrough!=oldStrikethrough:
if strikethrough:
# Translators: Reported when text is formatted with double strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=(_("double strikethrough") if strikethrough=="double"
# Translators: Reported when text is formatted with strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
else _("strikethrough"))
else:
# Translators: Reported when text is formatted without strikethrough.
# See http://en.wikipedia.org/wiki/Strikethrough
text=_("no strikethrough")
textList.append(text)
underline=attrs.get("underline")
oldUnderline=attrsCache.get("underline") if attrsCache is not None else None
if (underline or oldUnderline is not None) and underline!=oldUnderline:
# Translators: Reported when text is underlined.
text=(_("underlined") if underline
# Translators: Reported when text is not underlined.
else _("not underlined"))
textList.append(text)
textPosition=attrs.get("text-position")
oldTextPosition=attrsCache.get("text-position") if attrsCache is not None else None
if (textPosition or oldTextPosition is not None) and textPosition!=oldTextPosition:
textPosition=textPosition.lower() if textPosition else textPosition
if textPosition=="super":
# Translators: Reported for superscript text.
text=_("superscript")
elif textPosition=="sub":
# Translators: Reported for subscript text.
text=_("subscript")
else:
# Translators: Reported for text which is at the baseline position;
# i.e. not superscript or subscript.
text=_("baseline")
textList.append(text)
if formatConfig["reportAlignment"]:
textAlign=attrs.get("text-align")
oldTextAlign=attrsCache.get("text-align") if attrsCache is not None else None
if (textAlign or oldTextAlign is not None) and textAlign!=oldTextAlign:
textAlign=textAlign.lower() if textAlign else textAlign
if textAlign=="left":
# Translators: Reported when text is left-aligned.
text=_("align left")
elif textAlign=="center":
# Translators: Reported when text is centered.
text=_("align center")
elif textAlign=="right":
# Translators: Reported when text is right-aligned.
text=_("align right")
elif textAlign=="justify":
# Translators: Reported when text is justified.
# See http://en.wikipedia.org/wiki/Typographic_alignment#Justified
text=_("align justify")
elif textAlign=="distribute":
# Translators: Reported when text is justified with character spacing (Japanese etc)
# See http://kohei.us/2010/01/21/distributed-text-justification/
text=_("align distributed")
else:
# Translators: Reported when text has reverted to default alignment.
text=_("align default")
textList.append(text)
verticalAlign=attrs.get("vertical-align")
oldverticalAlign=attrsCache.get("vertical-align") if attrsCache is not None else None
if (verticalAlign or oldverticalAlign is not None) and verticalAlign!=oldverticalAlign:
verticalAlign=verticalAlign.lower() if verticalAlign else verticalAlign
if verticalAlign=="top":
# Translators: Reported when text is vertically top-aligned.
text=_("vertical align top")
elif verticalAlign in("center","middle"):
# Translators: Reported when text is vertically middle aligned.
text=_("vertical align middle")
elif verticalAlign=="bottom":
# Translators: Reported when text is vertically bottom-aligned.
text=_("vertical align bottom")
elif verticalAlign=="baseline":
# Translators: Reported when text is vertically aligned on the baseline.
text=_("vertical align baseline")
elif verticalAlign=="justify":
# Translators: Reported when text is vertically justified.
text=_("vertical align justified")
elif verticalAlign=="distributed":
# Translators: Reported when text is vertically justified but with character spacing (For some Asian content).
text=_("vertical align distributed")
else:
# Translators: Reported when text has reverted to default vertical alignment.
text=_("vertical align default")
textList.append(text)
if formatConfig["reportParagraphIndentation"]:
indentLabels={
'left-indent':(
# Translators: the label for paragraph format left indent
_("left indent"),
# Translators: the message when there is no paragraph format left indent
_("no left indent"),
),
'right-indent':(
# Translators: the label for paragraph format right indent
_("right indent"),
# Translators: the message when there is no paragraph format right indent
_("no right indent"),
),
'hanging-indent':(
# Translators: the label for paragraph format hanging indent
_("hanging indent"),
# Translators: the message when there is no paragraph format hanging indent
_("no hanging indent"),
),
'first-line-indent':(
# Translators: the label for paragraph format first line indent
_("first line indent"),
# Translators: the message when there is no paragraph format first line indent
_("no first line indent"),
),
}
for attr,(label,noVal) in indentLabels.iteritems():
newVal=attrs.get(attr)
oldVal=attrsCache.get(attr) if attrsCache else None
if (newVal or oldVal is not None) and newVal!=oldVal:
if newVal:
textList.append(u"%s %s"%(label,newVal))
else:
textList.append(noVal)
if formatConfig["reportLineSpacing"]:
lineSpacing=attrs.get("line-spacing")
oldLineSpacing=attrsCache.get("line-spacing") if attrsCache is not None else None
if (lineSpacing or oldLineSpacing is not None) and lineSpacing!=oldLineSpacing:
# Translators: a type of line spacing (E.g. single line spacing)
textList.append(_("line spacing %s")%lineSpacing)
if formatConfig["reportLinks"]:
link=attrs.get("link")
oldLink=attrsCache.get("link") if attrsCache is not None else None
if (link or oldLink is not None) and link!=oldLink:
text=_("link") if link else _("out of %s")%_("link")
textList.append(text)
if formatConfig["reportComments"]:
comment=attrs.get("comment")
oldComment=attrsCache.get("comment") if attrsCache is not None else None
if (comment or oldComment is not None) and comment!=oldComment:
if comment:
# Translators: Reported when text contains a comment.
text=_("has comment")
textList.append(text)
elif extraDetail:
# Translators: Reported when text no longer contains a comment.
text=_("out of comment")
textList.append(text)
if formatConfig["reportSpellingErrors"]:
invalidSpelling=attrs.get("invalid-spelling")
oldInvalidSpelling=attrsCache.get("invalid-spelling") if attrsCache is not None else None
if (invalidSpelling or oldInvalidSpelling is not None) and invalidSpelling!=oldInvalidSpelling:
if invalidSpelling:
# Translators: Reported when text contains a spelling error.
text=_("spelling error")
elif extraDetail:
# Translators: Reported when moving out of text containing a spelling error.
text=_("out of spelling error")
else:
text=""
if text:
textList.append(text)
invalidGrammar=attrs.get("invalid-grammar")
oldInvalidGrammar=attrsCache.get("invalid-grammar") if attrsCache is not None else None
if (invalidGrammar or oldInvalidGrammar is not None) and invalidGrammar!=oldInvalidGrammar:
if invalidGrammar:
# Translators: Reported when text contains a grammar error.
text=_("grammar error")
elif extraDetail:
# Translators: Reported when moving out of text containing a grammar error.
text=_("out of grammar error")
else:
text=""
if text:
textList.append(text)
if unit in (textInfos.UNIT_LINE,textInfos.UNIT_SENTENCE,textInfos.UNIT_PARAGRAPH,textInfos.UNIT_READINGCHUNK):
linePrefix=attrs.get("line-prefix")
if linePrefix:
textList.append(linePrefix)
if attrsCache is not None:
attrsCache.clear()
attrsCache.update(attrs)
return separator.join(textList)
def getTableInfoSpeech(tableInfo,oldTableInfo,extraDetail=False):
if tableInfo is None and oldTableInfo is None:
return ""
if tableInfo is None and oldTableInfo is not None:
# Translators: Indicates end of a table.
return _("out of table")
if not oldTableInfo or tableInfo.get("table-id")!=oldTableInfo.get("table-id"):
newTable=True
else:
newTable=False
textList=[]
if newTable:
columnCount=tableInfo.get("column-count",0)
rowCount=tableInfo.get("row-count",0)
# Translators: reports number of columns and rows in a table (example output: table with 3 columns and 5 rows).
text=_("table with {columnCount} columns and {rowCount} rows").format(columnCount=columnCount,rowCount=rowCount)
textList.append(text)
oldColumnNumber=oldTableInfo.get("column-number",0) if oldTableInfo else 0
columnNumber=tableInfo.get("column-number",0)
if columnNumber!=oldColumnNumber:
textList.append(_("column %s")%columnNumber)
oldRowNumber=oldTableInfo.get("row-number",0) if oldTableInfo else 0
rowNumber=tableInfo.get("row-number",0)
if rowNumber!=oldRowNumber:
textList.append(_("row %s")%rowNumber)
return " ".join(textList)
re_last_pause=re.compile(ur"^(.*(?<=[^\s.!?])[.!?][\"'”’)]?(?:\s+|$))(.*$)",re.DOTALL|re.UNICODE)
def speakWithoutPauses(speechSequence,detectBreaks=True):
"""
Speaks the speech sequences given over multiple calls, only sending to the synth at acceptable phrase or sentence boundaries, or when given None for the speech sequence.
"""
lastStartIndex=0
#Break on all explicit break commands
if detectBreaks and speechSequence:
sequenceLen=len(speechSequence)
for index in xrange(sequenceLen):
if isinstance(speechSequence[index],SpeakWithoutPausesBreakCommand):
if index>0 and lastStartIndex<index:
speakWithoutPauses(speechSequence[lastStartIndex:index],detectBreaks=False)
speakWithoutPauses(None)
lastStartIndex=index+1
if lastStartIndex<sequenceLen:
speakWithoutPauses(speechSequence[lastStartIndex:],detectBreaks=False)
return
finalSpeechSequence=[] #To be spoken now
pendingSpeechSequence=[] #To be saved off for speaking later
if speechSequence is None: #Requesting flush
if speakWithoutPauses._pendingSpeechSequence:
#Place the last incomplete phrase in to finalSpeechSequence to be spoken now
finalSpeechSequence=speakWithoutPauses._pendingSpeechSequence
speakWithoutPauses._pendingSpeechSequence=[]
else: #Handling normal speech
#Scan the given speech and place all completed phrases in finalSpeechSequence to be spoken,
#And place the final incomplete phrase in pendingSpeechSequence
for index in xrange(len(speechSequence)-1,-1,-1):
item=speechSequence[index]
if isinstance(item,basestring):
m=re_last_pause.match(item)
if m:
before,after=m.groups()
if after:
pendingSpeechSequence.append(after)
if before:
finalSpeechSequence.extend(speakWithoutPauses._pendingSpeechSequence)
speakWithoutPauses._pendingSpeechSequence=[]
finalSpeechSequence.extend(speechSequence[0:index])
finalSpeechSequence.append(before)
# Apply the last language change to the pending sequence.
# This will need to be done for any other speech change commands introduced in future.
for changeIndex in xrange(index-1,-1,-1):
change=speechSequence[changeIndex]
if not isinstance(change,LangChangeCommand):
continue
pendingSpeechSequence.append(change)
break
break
else:
pendingSpeechSequence.append(item)
else:
pendingSpeechSequence.append(item)
if pendingSpeechSequence:
pendingSpeechSequence.reverse()
speakWithoutPauses._pendingSpeechSequence.extend(pendingSpeechSequence)
#Scan the final speech sequence backwards
for item in reversed(finalSpeechSequence):
if isinstance(item,IndexCommand):
speakWithoutPauses.lastSentIndex=item.index
break
if finalSpeechSequence:
speak(finalSpeechSequence)
speakWithoutPauses.lastSentIndex=None
speakWithoutPauses._pendingSpeechSequence=[]
class SpeechCommand(object):
"""
The base class for objects that can be inserted between string of text for parituclar speech functions that convey things such as indexing or voice parameter changes.
"""
class IndexCommand(SpeechCommand):
"""Represents an index within some speech."""
def __init__(self,index):
"""
@param index: the value of this index
@type index: integer
"""
if not isinstance(index,int): raise ValueError("index must be int, not %s"%type(index))
self.index=index
def __repr__(self):
return "IndexCommand(%r)" % self.index
class CharacterModeCommand(SpeechCommand):
"""Turns character mode on and off for speech synths."""
def __init__(self,state):
"""
@param state: if true character mode is on, if false its turned off.
@type state: boolean
"""
if not isinstance(state,bool): raise ValueError("state must be boolean, not %s"%type(state))
self.state=state
def __repr__(self):
return "CharacterModeCommand(%r)" % self.state
class LangChangeCommand(SpeechCommand):
"""A command to switch the language within speech."""
def __init__(self,lang):
"""
@param lang: the language to switch to: If None then the NVDA locale will be used.
@type lang: string
"""
self.lang=lang # if lang else languageHandler.getLanguage()
def __repr__(self):
return "LangChangeCommand (%r)"%self.lang
class SpeakWithoutPausesBreakCommand(SpeechCommand):
"""Forces speakWithoutPauses to flush its buffer and therefore break the sentence at this point.
This should only be used with the L{speakWithoutPauses} function.
This will be removed during processing.
"""
class BreakCommand(SpeechCommand):
"""Insert a break between words.
"""
def __init__(self, time=0):
"""
@param time: The duration of the pause to be inserted in milliseconds.
@param time: int
"""
self.time = time
def __repr__(self):
return "BreakCommand(time=%d)" % self.time
class PitchCommand(SpeechCommand):
"""Change the pitch of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current pitch setting;
e.g. 0.5 is half, 1 returns to the current pitch setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "PitchCommand(multiplier=%g)" % self.multiplier
class VolumeCommand(SpeechCommand):
"""Change the volume of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current volume setting;
e.g. 0.5 is half, 1 returns to the current volume setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "VolumeCommand(multiplier=%g)" % self.multiplier
class RateCommand(SpeechCommand):
"""Change the rate of the voice.
"""
def __init__(self, multiplier=1):
"""
@param multiplier: The number by which to multiply the current rate setting;
e.g. 0.5 is half, 1 returns to the current rate setting.
@param multiplier: int/float
"""
self.multiplier = multiplier
def __repr__(self):
return "RateCommand(multiplier=%g)" % self.multiplier
class PhonemeCommand(SpeechCommand):
"""Insert a specific pronunciation.
This command accepts Unicode International Phonetic Alphabet (IPA) characters.
Note that this is not well supported by synthesizers.
"""
def __init__(self, ipa, text=None):
"""
@param ipa: Unicode IPA characters.
@type ipa: unicode
@param text: Text to speak if the synthesizer does not support
some or all of the specified IPA characters,
C{None} to ignore this command instead.
@type text: unicode
"""
self.ipa = ipa
self.text = text
def __repr__(self):
out = "PhonemeCommand(%r" % self.ipa
if self.text:
out += ", text=%r" % self.text
return out + ")"
| 1 | 22,841 | Is it still important to have `speakEntry` here? It is checked before this value `containerContainsText` is used in the "General" section. | nvaccess-nvda | py |
@@ -18,6 +18,7 @@ module Travis
CLEANUPS.each do |find_arg|
sh.raw "find #{find_arg[:directory]} -name #{find_arg[:glob]} -delete 2>/dev/null"
end
+ sh.export 'PATH', '$JAVA_HOME:$PATH'
end
def install | 1 | module Travis
module Build
class Script
class Jvm < Script
include Jdk
DEFAULTS = {
jdk: 'default'
}
CLEANUPS = [
{ directory: '$HOME/.ivy2', glob: "ivydata-*.properties"},
{ directory: '$HOME/.sbt', glob: "*.lock"}
]
def setup
super
CLEANUPS.each do |find_arg|
sh.raw "find #{find_arg[:directory]} -name #{find_arg[:glob]} -delete 2>/dev/null"
end
end
def install
sh.if '-f gradlew' do
sh.cmd './gradlew assemble', retry: true, fold: 'install'
end
sh.elif '-f build.gradle' do
sh.cmd 'gradle assemble', retry: true, fold: 'install'
end
sh.elif '-f mvnw' do
sh.cmd './mvnw install -DskipTests=true -Dmaven.javadoc.skip=true -B -V', retry: true, fold: 'install'
end
sh.elif '-f pom.xml' do
sh.cmd 'mvn install -DskipTests=true -Dmaven.javadoc.skip=true -B -V', retry: true, fold: 'install'
end
end
def script
sh.if '-f gradlew' do
sh.cmd './gradlew check'
end
sh.elif '-f build.gradle' do
sh.cmd 'gradle check'
end
sh.elif '-f mvnw' do
sh.cmd './mvnw test -B'
end
sh.elif '-f pom.xml' do
sh.cmd 'mvn test -B'
end
sh.else do
sh.cmd 'ant test'
end
end
end
end
end
end
| 1 | 15,562 | It is `$JAVA_HOME/bin`, not `$JAVA_HOME`, which should be added. | travis-ci-travis-build | rb |
@@ -9,6 +9,7 @@ bool generate_uuid(char out[static 37]) {
return true;
}
#else
+#include <assert.h>
#include <string.h>
#include <stdlib.h>
| 1 | #include <uuid.h>
#include "util/uuid.h"
#if HAS_LIBUUID
bool generate_uuid(char out[static 37]) {
uuid_t uuid;
uuid_generate_random(uuid);
uuid_unparse(uuid, out);
return true;
}
#else
#include <string.h>
#include <stdlib.h>
bool generate_uuid(char out[static 37]) {
uuid_t uuid;
uint32_t status;
uuid_create(&uuid, &status);
if (status != uuid_s_ok) {
return false;
}
char *str;
uuid_to_string(&uuid, &str, &status);
if (status != uuid_s_ok) {
return false;
}
assert(strlen(str) + 1 == 37);
memcpy(out, str, sizeof(out));
free(str);
return true;
}
#endif
| 1 | 16,556 | Should move this to within the `#else` block as it's only used there. | swaywm-wlroots | c |
@@ -41,6 +41,9 @@ class ProductListPage extends AbstractPage
$context = $this->getProductListCompomentContext();
$this->productListComponent->addProductToCartByName($productName, $quantity, $context);
+
+ $this->tester->waitForAjax();
+ $this->tester->wait(1);
}
/** | 1 | <?php
declare(strict_types=1);
namespace Tests\App\Acceptance\acceptance\PageObject\Front;
use Facebook\WebDriver\WebDriverBy;
use PHPUnit\Framework\Assert;
use Tests\App\Acceptance\acceptance\PageObject\AbstractPage;
use Tests\App\Test\Codeception\AcceptanceTester;
use Tests\App\Test\Codeception\Module\StrictWebDriver;
class ProductListPage extends AbstractPage
{
/**
* @var \Tests\App\Acceptance\acceptance\PageObject\Front\ProductListComponent
*/
private $productListComponent;
/**
* @param \Tests\App\Test\Codeception\Module\StrictWebDriver $strictWebDriver
* @param \Tests\App\Test\Codeception\AcceptanceTester $tester
* @param \Tests\App\Acceptance\acceptance\PageObject\Front\ProductListComponent $productListComponent
*/
public function __construct(
StrictWebDriver $strictWebDriver,
AcceptanceTester $tester,
ProductListComponent $productListComponent
) {
$this->productListComponent = $productListComponent;
parent::__construct($strictWebDriver, $tester);
}
/**
* @param string $productName
* @param int $quantity
*/
public function addProductToCartByName($productName, $quantity = 1)
{
$context = $this->getProductListCompomentContext();
$this->productListComponent->addProductToCartByName($productName, $quantity, $context);
}
/**
* @param int $expectedCount
*/
public function assertProductsTotalCount($expectedCount)
{
$totalCountElement = $this->getProductListCompomentContext()
->findElement(WebDriverBy::cssSelector('.js-paging-total-count'));
$actualCount = (int)trim($totalCountElement->getText());
$message = 'Product list expects ' . $expectedCount . ' products but contains ' . $actualCount . '.';
Assert::assertSame($expectedCount, $actualCount, $message);
}
/**
* @return \Facebook\WebDriver\WebDriverElement
*/
private function getProductListCompomentContext()
{
return $this->webDriver->findElement(WebDriverBy::cssSelector('.web__main__content'));
}
}
| 1 | 23,539 | is this a common rule to wait one extra second after ajax? I'm thinking about moving this extra wait into waitForAjax method. | shopsys-shopsys | php |
@@ -94,8 +94,8 @@ abstract class AbstractSolrTask extends AbstractTask {
public function __sleep()
{
$properties = get_object_vars($this);
- // avoid serialization if the site object
- unset($properties['site']);
+ // avoid serialization if the site and logger object
+ unset($properties['site'], $properties['logger']);
return array_keys($properties);
}
-}
+} | 1 | <?php
namespace ApacheSolrForTypo3\Solr\Task;
/***************************************************************
* Copyright notice
*
* (c) 2017 Timo Hund <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
use ApacheSolrForTypo3\Solr\Domain\Site\SiteRepository;
use ApacheSolrForTypo3\Solr\Domain\Site\Site;
use ApacheSolrForTypo3\Solr\System\Logging\SolrLogManager;
use TYPO3\CMS\Core\Utility\GeneralUtility;
use TYPO3\CMS\Scheduler\Task\AbstractTask;
/**
* Abstract scheduler task for solr scheduler tasks, contains the logic to
* retrieve the site, avoids serialization of site, when scheduler task is saved.
*
* @package ApacheSolrForTypo3\Solr\Task
*/
abstract class AbstractSolrTask extends AbstractTask {
/**
* The site this task is supposed to initialize the index queue for.
*
* @var Site
*/
protected $site;
/**
* The rootPageId of the site that should be reIndexed
*
* @var integer
*/
protected $rootPageId;
/**
* @return int
*/
public function getRootPageId()
{
return $this->rootPageId;
}
/**
* @param int $rootPageId
*/
public function setRootPageId($rootPageId)
{
$this->rootPageId = $rootPageId;
}
/**
* @return Site
*/
public function getSite()
{
if (!is_null($this->site)) {
return $this->site;
}
try {
/** @var $siteRepository SiteRepository */
$siteRepository = GeneralUtility::makeInstance(SiteRepository::class);
$this->site = $siteRepository->getSiteByRootPageId($this->rootPageId);
} catch (\InvalidArgumentException $e) {
$logger = GeneralUtility::makeInstance(SolrLogManager::class, /** @scrutinizer ignore-type */ __CLASS__);
$logger->log(SolrLogManager::ERROR, 'Scheduler task tried to get invalid site');
}
return $this->site;
}
/**
* @return array
*/
public function __sleep()
{
$properties = get_object_vars($this);
// avoid serialization if the site object
unset($properties['site']);
return array_keys($properties);
}
} | 1 | 6,581 | Maybe you can correct the typo "if" too :) | TYPO3-Solr-ext-solr | php |
@@ -298,6 +298,9 @@ type KeybaseService interface {
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
+ IdentifyForChat(ctx context.Context, assertion, reason string) (
+ UserInfo, *keybase1.IdentifyTrackBreaks, error)
+
// LoadUserPlusKeys returns a UserInfo struct for a
// user with the specified UID.
// If you have the UID for a user and don't require Identify to | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/kbfscodec"
"github.com/keybase/kbfs/kbfscrypto"
metrics "github.com/rcrowley/go-metrics"
"golang.org/x/net/context"
)
// Block just needs to be (de)serialized using msgpack
type Block interface {
// GetEncodedSize returns the encoded size of this block, but only
// if it has been previously set; otherwise it returns 0.
GetEncodedSize() uint32
// SetEncodedSize sets the encoded size of this block, locally
// caching it. The encoded size is not serialized.
SetEncodedSize(size uint32)
// DataVersion returns the data version for this block
DataVersion() DataVer
}
// NodeID is a unique but transient ID for a Node. That is, two Node
// objects in memory at the same time represent the same file or
// directory if and only if their NodeIDs are equal (by pointer).
type NodeID interface {
// ParentID returns the NodeID of the directory containing the
// pointed-to file or directory, or nil if none exists.
ParentID() NodeID
}
// Node represents a direct pointer to a file or directory in KBFS.
// It is somewhat like an inode in a regular file system. Users of
// KBFS can use Node as a handle when accessing files or directories
// they have previously looked up.
type Node interface {
// GetID returns the ID of this Node. This should be used as a
// map key instead of the Node itself.
GetID() NodeID
// GetFolderBranch returns the folder ID and branch for this Node.
GetFolderBranch() FolderBranch
// GetBasename returns the current basename of the node, or ""
// if the node has been unlinked.
GetBasename() string
}
// KBFSOps handles all file system operations. Expands all indirect
// pointers. Operations that modify the server data change all the
// block IDs along the path, and so must return a path with the new
// BlockIds so the caller can update their references.
//
// KBFSOps implementations must guarantee goroutine-safety of calls on
// a per-top-level-folder basis.
//
// There are two types of operations that could block:
// * remote-sync operations, that need to synchronously update the
// MD for the corresponding top-level folder. When these
// operations return successfully, they will have guaranteed to
// have successfully written the modification to the KBFS servers.
// * remote-access operations, that don't sync any modifications to KBFS
// servers, but may block on reading data from the servers.
//
// KBFSOps implementations are supposed to give git-like consistency
// semantics for modification operations; they will be visible to
// other clients immediately after the remote-sync operations succeed,
// if and only if there was no other intervening modification to the
// same folder. If not, the change will be sync'd to the server in a
// special per-device "unmerged" area before the operation succeeds.
// In this case, the modification will not be visible to other clients
// until the KBFS code on this device performs automatic conflict
// resolution in the background.
//
// All methods take a Context (see https://blog.golang.org/context),
// and if that context is cancelled during the operation, KBFSOps will
// abort any blocking calls and return ctx.Err(). Any notifications
// resulting from an operation will also include this ctx (or a
// Context derived from it), allowing the caller to determine whether
// the notification is a result of their own action or an external
// action.
type KBFSOps interface {
// GetFavorites returns the logged-in user's list of favorite
// top-level folders. This is a remote-access operation.
GetFavorites(ctx context.Context) ([]Favorite, error)
// RefreshCachedFavorites tells the instances to forget any cached
// favorites list and fetch a new list from the server. The
// effects are asychronous; if there's an error refreshing the
// favorites, the cached favorites will become empty.
RefreshCachedFavorites(ctx context.Context)
// AddFavorite adds the favorite to both the server and
// the local cache.
AddFavorite(ctx context.Context, fav Favorite) error
// DeleteFavorite deletes the favorite from both the server and
// the local cache. Idempotent, so it succeeds even if the folder
// isn't favorited.
DeleteFavorite(ctx context.Context, fav Favorite) error
// GetTLFCryptKeys gets crypt key of all generations as well as
// TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by
// generation, starting with the key for FirstValidKeyGen.
GetTLFCryptKeys(ctx context.Context, tlfHandle *TlfHandle) (
keys []kbfscrypto.TLFCryptKey, id TlfID, err error)
// GetTLFID gets the TlfID for tlfHandle.
GetTLFID(ctx context.Context, tlfHandle *TlfHandle) (TlfID, error)
// GetOrCreateRootNode returns the root node and root entry
// info associated with the given TLF handle and branch, if
// the logged-in user has read permissions to the top-level
// folder. It creates the folder if one doesn't exist yet (and
// branch == MasterBranch), and the logged-in user has write
// permissions to the top-level folder. This is a
// remote-access operation.
GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetRootNode is like GetOrCreateRootNode but if the root node
// does not exist it will return a nil Node and not create it.
GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error)
// GetDirChildren returns a map of children in the directory,
// mapped to their EntryInfo, if the logged-in user has read
// permission for the top-level folder. This is a remote-access
// operation.
GetDirChildren(ctx context.Context, dir Node) (map[string]EntryInfo, error)
// Lookup returns the Node and entry info associated with a
// given name in a directory, if the logged-in user has read
// permissions to the top-level folder. The returned Node is nil
// if the name is a symlink. This is a remote-access operation.
Lookup(ctx context.Context, dir Node, name string) (Node, EntryInfo, error)
// Stat returns the entry info associated with a
// given Node, if the logged-in user has read permissions to the
// top-level folder. This is a remote-access operation.
Stat(ctx context.Context, node Node) (EntryInfo, error)
// CreateDir creates a new subdirectory under the given node, if
// the logged-in user has write permission to the top-level
// folder. Returns the new Node for the created subdirectory, and
// its new entry info. This is a remote-sync operation.
CreateDir(ctx context.Context, dir Node, name string) (
Node, EntryInfo, error)
// CreateFile creates a new file under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new Node for the created file, and its new
// entry info. excl (when implemented) specifies whether this is an exclusive
// create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a
// Unix open() call.
//
// This is a remote-sync operation.
CreateFile(ctx context.Context, dir Node, name string, isExec bool, excl Excl) (
Node, EntryInfo, error)
// CreateLink creates a new symlink under the given node, if the
// logged-in user has write permission to the top-level folder.
// Returns the new entry info for the created symlink. This
// is a remote-sync operation.
CreateLink(ctx context.Context, dir Node, fromName string, toPath string) (
EntryInfo, error)
// RemoveDir removes the subdirectory represented by the given
// node, if the logged-in user has write permission to the
// top-level folder. Will return an error if the subdirectory is
// not empty. This is a remote-sync operation.
RemoveDir(ctx context.Context, dir Node, dirName string) error
// RemoveEntry removes the directory entry represented by the
// given node, if the logged-in user has write permission to the
// top-level folder. This is a remote-sync operation.
RemoveEntry(ctx context.Context, dir Node, name string) error
// Rename performs an atomic rename operation with a given
// top-level folder if the logged-in user has write permission to
// that folder, and will return an error if nodes from different
// folders are passed in. Also returns an error if the new name
// already has an entry corresponding to an existing directory
// (only non-dir types may be renamed over). This is a
// remote-sync operation.
Rename(ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) error
// Read fills in the given buffer with data from the file at the
// given node starting at the given offset, if the logged-in user
// has read permission to the top-level folder. The read data
// reflects any outstanding writes and truncates to that file that
// have been written through this KBFSOps object, even if those
// writes have not yet been sync'd. There is no guarantee that
// Read returns all of the requested data; it will return the
// number of bytes that it wrote to the dest buffer. Reads on an
// unlinked file may or may not succeed, depending on whether or
// not the data has been cached locally. If (0, nil) is returned,
// that means EOF has been reached. This is a remote-access
// operation.
Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error)
// Write modifies the file at the given node, by writing the given
// buffer at the given offset within the file, if the logged-in
// user has write permission to the top-level folder. It
// overwrites any data already there, and extends the file size as
// necessary to accomodate the new data. It guarantees to write
// the entire buffer in one operation. Writes on an unlinked file
// may or may not succeed as no-ops, depending on whether or not
// the necessary blocks have been locally cached. This is a
// remote-access operation.
Write(ctx context.Context, file Node, data []byte, off int64) error
// Truncate modifies the file at the given node, by either
// shrinking or extending its size to match the given size, if the
// logged-in user has write permission to the top-level folder.
// If extending the file, it pads the new data with 0s. Truncates
// on an unlinked file may or may not succeed as no-ops, depending
// on whether or not the necessary blocks have been locally
// cached. This is a remote-access operation.
Truncate(ctx context.Context, file Node, size uint64) error
// SetEx turns on or off the executable bit on the file
// represented by a given node, if the logged-in user has write
// permissions to the top-level folder. This is a remote-sync
// operation.
SetEx(ctx context.Context, file Node, ex bool) error
// SetMtime sets the modification time on the file represented by
// a given node, if the logged-in user has write permissions to
// the top-level folder. If mtime is nil, it is a noop. This is
// a remote-sync operation.
SetMtime(ctx context.Context, file Node, mtime *time.Time) error
// Sync flushes all outstanding writes and truncates for the given
// file to the KBFS servers, if the logged-in user has write
// permissions to the top-level folder. If done through a file
// system interface, this may include modifications done via
// multiple file handles. This is a remote-sync operation.
Sync(ctx context.Context, file Node) error
// FolderStatus returns the status of a particular folder/branch, along
// with a channel that will be closed when the status has been
// updated (to eliminate the need for polling this method).
FolderStatus(ctx context.Context, folderBranch FolderBranch) (
FolderBranchStatus, <-chan StatusUpdate, error)
// Status returns the status of KBFS, along with a channel that will be
// closed when the status has been updated (to eliminate the need for
// polling this method). KBFSStatus can be non-empty even if there is an
// error.
Status(ctx context.Context) (
KBFSStatus, <-chan StatusUpdate, error)
// UnstageForTesting clears out this device's staged state, if
// any, and fast-forwards to the current head of this
// folder-branch.
UnstageForTesting(ctx context.Context, folderBranch FolderBranch) error
// Rekey rekeys this folder.
Rekey(ctx context.Context, id TlfID) error
// SyncFromServerForTesting blocks until the local client has
// contacted the server and guaranteed that all known updates
// for the given top-level folder have been applied locally
// (and notifications sent out to any observers). It returns
// an error if this folder-branch is currently unmerged or
// dirty locally.
SyncFromServerForTesting(ctx context.Context, folderBranch FolderBranch) error
// GetUpdateHistory returns a complete history of all the merged
// updates of the given folder, in a data structure that's
// suitable for encoding directly into JSON. This is an expensive
// operation, and should only be used for ocassional debugging.
// Note that the history does not include any unmerged changes or
// outstanding writes from the local device.
GetUpdateHistory(ctx context.Context, folderBranch FolderBranch) (
history TLFUpdateHistory, err error)
// GetEditHistory returns a clustered list of the most recent file
// edits by each of the valid writers of the given folder. users
// looking to get updates to this list can register as an observer
// for the folder.
GetEditHistory(ctx context.Context, folderBranch FolderBranch) (
edits TlfWriterEdits, err error)
// GetNodeMetadata gets metadata associated with a Node.
GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error)
// Shutdown is called to clean up any resources associated with
// this KBFSOps instance.
Shutdown() error
// PushConnectionStatusChange updates the status of a service for
// human readable connection status tracking.
PushConnectionStatusChange(service string, newStatus error)
}
// KeybaseService is an interface for communicating with the keybase
// service.
type KeybaseService interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
// Identify, given an assertion, returns a UserInfo struct
// with the user that matches that assertion, or an error
// otherwise. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
// LoadUserPlusKeys returns a UserInfo struct for a
// user with the specified UID.
// If you have the UID for a user and don't require Identify to
// validate an assertion or the identity of a user, use this to
// get UserInfo structs as it is much cheaper than Identify.
LoadUserPlusKeys(ctx context.Context, uid keybase1.UID) (UserInfo, error)
// LoadUnverifiedKeys returns a list of unverified public keys. They are the union
// of all known public keys associated with the account and the currently verified
// keys currently part of the user's sigchain.
LoadUnverifiedKeys(ctx context.Context, uid keybase1.UID) (
[]keybase1.PublicKey, error)
// CurrentSession returns a SessionInfo struct with all the
// information for the current session, or an error otherwise.
CurrentSession(ctx context.Context, sessionID int) (SessionInfo, error)
// FavoriteAdd adds the given folder to the list of favorites.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteAdd removes the given folder from the list of
// favorites.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the current list of favorites.
FavoriteList(ctx context.Context, sessionID int) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
// NotifySyncStatus sends a sync status notification.
NotifySyncStatus(ctx context.Context,
status *keybase1.FSPathSyncStatus) error
// FlushUserFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached information about the given user.
// This does NOT involve communication with the daemon, this is
// just to force future calls loading this user to fall through to
// the daemon itself, rather than being served from the cache.
FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID)
// FlushUserUnverifiedKeysFromLocalCache instructs this layer to clear any
// KBFS-side, locally-cached unverified keys for the given user.
FlushUserUnverifiedKeysFromLocalCache(ctx context.Context, uid keybase1.UID)
// TODO: Add CryptoClient methods, too.
// Shutdown frees any resources associated with this
// instance. No other methods may be called after this is
// called.
Shutdown()
}
// KeybaseServiceCn defines methods needed to construct KeybaseService
// and Crypto implementations.
type KeybaseServiceCn interface {
NewKeybaseService(config Config, params InitParams, ctx Context, log logger.Logger) (KeybaseService, error)
NewCrypto(config Config, params InitParams, ctx Context, log logger.Logger) (Crypto, error)
}
type resolver interface {
// Resolve, given an assertion, resolves it to a username/UID
// pair. The username <-> UID mapping is trusted and
// immutable, so it can be cached. If the assertion is just
// the username or a UID assertion, then the resolution can
// also be trusted. If the returned pair is equal to that of
// the current session, then it can also be
// trusted. Otherwise, Identify() needs to be called on the
// assertion before the assertion -> (username, UID) mapping
// can be trusted.
Resolve(ctx context.Context, assertion string) (
libkb.NormalizedUsername, keybase1.UID, error)
}
type identifier interface {
// Identify resolves an assertion (which could also be a
// username) to a UserInfo struct, spawning tracker popups if
// necessary. The reason string is displayed on any tracker
// popups spawned.
Identify(ctx context.Context, assertion, reason string) (UserInfo, error)
}
type normalizedUsernameGetter interface {
// GetNormalizedUsername returns the normalized username
// corresponding to the given UID.
GetNormalizedUsername(ctx context.Context, uid keybase1.UID) (libkb.NormalizedUsername, error)
}
type currentInfoGetter interface {
// GetCurrentToken gets the current keybase session token.
GetCurrentToken(ctx context.Context) (string, error)
// GetCurrentUserInfo gets the name and UID of the current
// logged-in user.
GetCurrentUserInfo(ctx context.Context) (
libkb.NormalizedUsername, keybase1.UID, error)
// GetCurrentCryptPublicKey gets the crypt public key for the
// currently-active device.
GetCurrentCryptPublicKey(ctx context.Context) (
kbfscrypto.CryptPublicKey, error)
// GetCurrentVerifyingKey gets the public key used for signing for the
// currently-active device.
GetCurrentVerifyingKey(ctx context.Context) (
kbfscrypto.VerifyingKey, error)
}
// KBPKI interacts with the Keybase daemon to fetch user info.
type KBPKI interface {
currentInfoGetter
resolver
identifier
normalizedUsernameGetter
// HasVerifyingKey returns nil if the given user has the given
// VerifyingKey, and an error otherwise.
HasVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey,
atServerTime time.Time) error
// HasUnverifiedVerifyingKey returns nil if the given user has the given
// unverified VerifyingKey, and an error otherwise. Note that any match
// is with a key not verified to be currently connected to the user via
// their sigchain. This is currently only used to verify finalized or
// reset TLFs. Further note that unverified keys is a super set of
// verified keys.
HasUnverifiedVerifyingKey(ctx context.Context, uid keybase1.UID,
verifyingKey kbfscrypto.VerifyingKey) error
// GetCryptPublicKeys gets all of a user's crypt public keys (including
// paper keys).
GetCryptPublicKeys(ctx context.Context, uid keybase1.UID) (
[]kbfscrypto.CryptPublicKey, error)
// TODO: Split the methods below off into a separate
// FavoriteOps interface.
// FavoriteAdd adds folder to the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteAdd(ctx context.Context, folder keybase1.Folder) error
// FavoriteDelete deletes folder from the list of the logged in user's
// favorite folders. It is idempotent.
FavoriteDelete(ctx context.Context, folder keybase1.Folder) error
// FavoriteList returns the list of all favorite folders for
// the logged in user.
FavoriteList(ctx context.Context) ([]keybase1.Folder, error)
// Notify sends a filesystem notification.
Notify(ctx context.Context, notification *keybase1.FSNotification) error
}
// KeyMetadata is an interface for something that holds key
// information. This is usually implemented by RootMetadata.
type KeyMetadata interface {
// TlfID returns the ID of the TLF for which this object holds
// key info.
TlfID() TlfID
// LatestKeyGeneration returns the most recent key generation
// with key data in this object, or PublicKeyGen if this TLF
// is public.
LatestKeyGeneration() KeyGen
// GetTlfHandle returns the handle for the TLF. It must not
// return nil.
//
// TODO: Remove the need for this function in this interface,
// so that BareRootMetadata can implement this interface
// fully.
GetTlfHandle() *TlfHandle
// HasKeyForUser returns whether or not the given user has
// keys for at least one device at the given key
// generation. Returns false if the TLF is public, or if the
// given key generation is invalid.
HasKeyForUser(keyGen KeyGen, user keybase1.UID) bool
// GetTLFCryptKeyParams returns all the necessary info to
// construct the TLF crypt key for the given key generation,
// user, and device (identified by its crypt public key), or
// false if not found. This returns an error if the TLF is
// public.
GetTLFCryptKeyParams(
keyGen KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFEphemeralPublicKey,
EncryptedTLFCryptKeyClientHalf,
TLFCryptKeyServerHalfID, bool, error)
// StoresHistoricTLFCryptKeys returns whether or not history keys are
// symmetrically encrypted; if not, they're encrypted per-device.
StoresHistoricTLFCryptKeys() bool
// GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given
// generation using the current generation's TLFCryptKey.
GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen,
currentKey kbfscrypto.TLFCryptKey) (
kbfscrypto.TLFCryptKey, error)
}
type encryptionKeyGetter interface {
// GetTLFCryptKeyForEncryption gets the crypt key to use for
// encryption (i.e., with the latest key generation) for the
// TLF with the given metadata.
GetTLFCryptKeyForEncryption(ctx context.Context, kmd KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
}
// KeyManager fetches and constructs the keys needed for KBFS file
// operations.
type KeyManager interface {
encryptionKeyGetter
// GetTLFCryptKeyForMDDecryption gets the crypt key to use for the
// TLF with the given metadata to decrypt the private portion of
// the metadata. It finds the appropriate key from mdWithKeys
// (which in most cases is the same as mdToDecrypt) if it's not
// already cached.
GetTLFCryptKeyForMDDecryption(ctx context.Context,
kmdToDecrypt, kmdWithKeys KeyMetadata) (
kbfscrypto.TLFCryptKey, error)
// GetTLFCryptKeyForBlockDecryption gets the crypt key to use
// for the TLF with the given metadata to decrypt the block
// pointed to by the given pointer.
GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd KeyMetadata,
blockPtr BlockPointer) (kbfscrypto.TLFCryptKey, error)
// GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations
// for current devices. keys contains crypt keys from all generations, in
// order, starting from FirstValidKeyGen.
GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd KeyMetadata) (
keys []kbfscrypto.TLFCryptKey, err error)
// Rekey checks the given MD object, if it is a private TLF,
// against the current set of device keys for all valid
// readers and writers. If there are any new devices, it
// updates all existing key generations to include the new
// devices. If there are devices that have been removed, it
// creates a new epoch of keys for the TLF. If no devices
// have changed, or if there was an error, it returns false.
// Otherwise, it returns true. If a new key generation is
// added the second return value points to this new key. This
// is to allow for caching of the TLF crypt key only after a
// successful merged write of the metadata. Otherwise we could
// prematurely pollute the key cache.
//
// If the given MD object is a public TLF, it simply updates
// the TLF's handle with any newly-resolved writers.
//
// If promptPaper is set, prompts for any unlocked paper keys.
// promptPaper shouldn't be set if md is for a public TLF.
Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) (
bool, *kbfscrypto.TLFCryptKey, error)
}
// Reporter exports events (asynchronously) to any number of sinks
type Reporter interface {
// ReportErr records that a given error happened.
ReportErr(ctx context.Context, tlfName CanonicalTlfName, public bool,
mode ErrorModeType, err error)
// AllKnownErrors returns all errors known to this Reporter.
AllKnownErrors() []ReportedError
// Notify sends the given notification to any sink.
Notify(ctx context.Context, notification *keybase1.FSNotification)
// NotifySyncStatus sends the given path sync status to any sink.
NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus)
// Shutdown frees any resources allocated by a Reporter.
Shutdown()
}
// MDCache gets and puts plaintext top-level metadata into the cache.
type MDCache interface {
// Get gets the metadata object associated with the given TlfID,
// revision number, and branch ID (NullBranchID for merged MD).
Get(tlf TlfID, rev MetadataRevision, bid BranchID) (ImmutableRootMetadata, error)
// Put stores the metadata object.
Put(md ImmutableRootMetadata) error
// Delete removes the given metadata object from the cache if it exists.
Delete(tlf TlfID, rev MetadataRevision, bid BranchID)
}
// KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys.
type KeyCache interface {
// GetTLFCryptKey gets the crypt key for the given TLF.
GetTLFCryptKey(TlfID, KeyGen) (kbfscrypto.TLFCryptKey, error)
// PutTLFCryptKey stores the crypt key for the given TLF.
PutTLFCryptKey(TlfID, KeyGen, kbfscrypto.TLFCryptKey) error
}
// BlockCacheLifetime denotes the lifetime of an entry in BlockCache.
type BlockCacheLifetime int
const (
// TransientEntry means that the cache entry may be evicted at
// any time.
TransientEntry BlockCacheLifetime = iota
// PermanentEntry means that the cache entry must remain until
// explicitly removed from the cache.
PermanentEntry
)
// BlockCache gets and puts plaintext dir blocks and file blocks into
// a cache. These blocks are immutable and identified by their
// content hash.
type BlockCache interface {
// Get gets the block associated with the given block ID.
Get(ptr BlockPointer) (Block, error)
// CheckForKnownPtr sees whether this cache has a transient
// entry for the given file block, which must be a direct file
// block containing data). Returns the full BlockPointer
// associated with that ID, including key and data versions.
// If no ID is known, return an uninitialized BlockPointer and
// a nil error.
CheckForKnownPtr(tlf TlfID, block *FileBlock) (BlockPointer, error)
// Put stores the final (content-addressable) block associated
// with the given block ID. If lifetime is TransientEntry,
// then it is assumed that the block exists on the server and
// the entry may be evicted from the cache at any time. If
// lifetime is PermanentEntry, then it is assumed that the
// block doesn't exist on the server and must remain in the
// cache until explicitly removed. As an intermediary state,
// as when a block is being sent to the server, the block may
// be put into the cache both with TransientEntry and
// PermanentEntry -- these are two separate entries. This is
// fine, since the block should be the same.
Put(ptr BlockPointer, tlf TlfID, block Block,
lifetime BlockCacheLifetime) error
// DeleteTransient removes the transient entry for the given
// pointer from the cache, as well as any cached IDs so the block
// won't be reused.
DeleteTransient(ptr BlockPointer, tlf TlfID) error
// Delete removes the permanent entry for the non-dirty block
// associated with the given block ID from the cache. No
// error is returned if no block exists for the given ID.
DeletePermanent(id BlockID) error
// DeleteKnownPtr removes the cached ID for the given file
// block. It does not remove the block itself.
DeleteKnownPtr(tlf TlfID, block *FileBlock) error
}
// DirtyPermChan is a channel that gets closed when the holder has
// permission to write. We are forced to define it as a type due to a
// bug in mockgen that can't handle return values with a chan
// struct{}.
type DirtyPermChan <-chan struct{}
// DirtyBlockCache gets and puts plaintext dir blocks and file blocks
// into a cache, which have been modified by the application and not
// yet committed on the KBFS servers. They are identified by a
// (potentially random) ID that may not have any relationship with
// their context, along with a Branch in case the same TLF is being
// modified via multiple branches. Dirty blocks are never evicted,
// they must be deleted explicitly.
type DirtyBlockCache interface {
// Get gets the block associated with the given block ID. Returns
// the dirty block for the given ID, if one exists.
Get(tlfID TlfID, ptr BlockPointer, branch BranchName) (Block, error)
// Put stores a dirty block currently identified by the
// given block pointer and branch name.
Put(tlfID TlfID, ptr BlockPointer, branch BranchName, block Block) error
// Delete removes the dirty block associated with the given block
// pointer and branch from the cache. No error is returned if no
// block exists for the given ID.
Delete(tlfID TlfID, ptr BlockPointer, branch BranchName) error
// IsDirty states whether or not the block associated with the
// given block pointer and branch name is dirty in this cache.
IsDirty(tlfID TlfID, ptr BlockPointer, branch BranchName) bool
// IsAnyDirty returns whether there are any dirty blocks in the
// cache. tlfID may be ignored.
IsAnyDirty(tlfID TlfID) bool
// RequestPermissionToDirty is called whenever a user wants to
// write data to a file. The caller provides an estimated number
// of bytes that will become dirty -- this is difficult to know
// exactly without pre-fetching all the blocks involved, but in
// practice we can just use the number of bytes sent in via the
// Write. It returns a channel that blocks until the cache is
// ready to receive more dirty data, at which point the channel is
// closed. The user must call
// `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has
// completed its write and called `UpdateUnsyncedBytes` for all
// the exact dirty block sizes.
RequestPermissionToDirty(ctx context.Context, tlfID TlfID,
estimatedDirtyBytes int64) (DirtyPermChan, error)
// UpdateUnsyncedBytes is called by a user, who has already been
// granted permission to write, with the delta in block sizes that
// were dirtied as part of the write. So for example, if a
// newly-dirtied block of 20 bytes was extended by 5 bytes, they
// should send 25. If on the next write (before any syncs), bytes
// 10-15 of that same block were overwritten, they should send 0
// over the channel because there were no new bytes. If an
// already-dirtied block is truncated, or if previously requested
// bytes have now been updated more accurately in previous
// requests, newUnsyncedBytes may be negative. wasSyncing should
// be true if `BlockSyncStarted` has already been called for this
// block.
UpdateUnsyncedBytes(tlfID TlfID, newUnsyncedBytes int64, wasSyncing bool)
// UpdateSyncingBytes is called when a particular block has
// started syncing, or with a negative number when a block is no
// longer syncing due to an error (and BlockSyncFinished will
// never be called).
UpdateSyncingBytes(tlfID TlfID, size int64)
// BlockSyncFinished is called when a particular block has
// finished syncing, though the overall sync might not yet be
// complete. This lets the cache know it might be able to grant
// more permission to writers.
BlockSyncFinished(tlfID TlfID, size int64)
// SyncFinished is called when a complete sync has completed and
// its dirty blocks have been removed from the cache. This lets
// the cache know it might be able to grant more permission to
// writers.
SyncFinished(tlfID TlfID, size int64)
// ShouldForceSync returns true if the sync buffer is full enough
// to force all callers to sync their data immediately.
ShouldForceSync(tlfID TlfID) bool
// Shutdown frees any resources associated with this instance. It
// returns an error if there are any unsynced blocks.
Shutdown() error
}
// cryptoPure contains all methods of Crypto that don't depend on
// implicit state, i.e. they're pure functions of the input.
type cryptoPure interface {
// MakeRandomTlfID generates a dir ID using a CSPRNG.
MakeRandomTlfID(isPublic bool) (TlfID, error)
// MakeRandomBranchID generates a per-device branch ID using a CSPRNG.
MakeRandomBranchID() (BranchID, error)
// MakeMdID computes the MD ID of a RootMetadata object.
// TODO: This should move to BareRootMetadata. Note though, that some mock tests
// rely on it being part of the config and crypto_measured.go uses it to keep
// statistics on time spent hashing.
MakeMdID(md BareRootMetadata) (MdID, error)
// MakeMerkleHash computes the hash of a RootMetadataSigned object
// for inclusion into the KBFS Merkle tree.
MakeMerkleHash(md *RootMetadataSigned) (MerkleHash, error)
// MakeTemporaryBlockID generates a temporary block ID using a
// CSPRNG. This is used for indirect blocks before they're
// committed to the server.
MakeTemporaryBlockID() (BlockID, error)
// MakePermanentBlockID computes the permanent ID of a block
// given its encoded and encrypted contents.
MakePermanentBlockID(encodedEncryptedData []byte) (BlockID, error)
// VerifyBlockID verifies that the given block ID is the
// permanent block ID for the given encoded and encrypted
// data.
VerifyBlockID(encodedEncryptedData []byte, id BlockID) error
// MakeRefNonce generates a block reference nonce using a
// CSPRNG. This is used for distinguishing different references to
// the same BlockID.
MakeBlockRefNonce() (BlockRefNonce, error)
// MakeRandomTLFKeys generates top-level folder keys using a CSPRNG.
MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey,
kbfscrypto.TLFPrivateKey, kbfscrypto.TLFEphemeralPublicKey,
kbfscrypto.TLFEphemeralPrivateKey, kbfscrypto.TLFCryptKey,
error)
// MakeRandomTLFCryptKeyServerHalf generates the server-side of a
// top-level folder crypt key.
MakeRandomTLFCryptKeyServerHalf() (
kbfscrypto.TLFCryptKeyServerHalf, error)
// MakeRandomBlockCryptKeyServerHalf generates the server-side of
// a block crypt key.
MakeRandomBlockCryptKeyServerHalf() (
kbfscrypto.BlockCryptKeyServerHalf, error)
// MaskTLFCryptKey returns the client-side of a top-level folder crypt key.
MaskTLFCryptKey(serverHalf kbfscrypto.TLFCryptKeyServerHalf,
key kbfscrypto.TLFCryptKey) (
kbfscrypto.TLFCryptKeyClientHalf, error)
// UnmaskTLFCryptKey returns the top-level folder crypt key.
UnmaskTLFCryptKey(serverHalf kbfscrypto.TLFCryptKeyServerHalf,
clientHalf kbfscrypto.TLFCryptKeyClientHalf) (
kbfscrypto.TLFCryptKey, error)
// UnmaskBlockCryptKey returns the block crypt key.
UnmaskBlockCryptKey(serverHalf kbfscrypto.BlockCryptKeyServerHalf,
tlfCryptKey kbfscrypto.TLFCryptKey) (
kbfscrypto.BlockCryptKey, error)
// Verify verifies that sig matches msg being signed with the
// private key that corresponds to verifyingKey.
Verify(msg []byte, sigInfo kbfscrypto.SignatureInfo) error
// EncryptTLFCryptKeyClientHalf encrypts a TLFCryptKeyClientHalf
// using both a TLF's ephemeral private key and a device pubkey.
EncryptTLFCryptKeyClientHalf(
privateKey kbfscrypto.TLFEphemeralPrivateKey,
publicKey kbfscrypto.CryptPublicKey,
clientHalf kbfscrypto.TLFCryptKeyClientHalf) (
EncryptedTLFCryptKeyClientHalf, error)
// EncryptPrivateMetadata encrypts a PrivateMetadata object.
EncryptPrivateMetadata(
pmd *PrivateMetadata, key kbfscrypto.TLFCryptKey) (
EncryptedPrivateMetadata, error)
// DecryptPrivateMetadata decrypts a PrivateMetadata object.
DecryptPrivateMetadata(
encryptedPMD EncryptedPrivateMetadata,
key kbfscrypto.TLFCryptKey) (*PrivateMetadata, error)
// EncryptBlocks encrypts a block. plainSize is the size of the encoded
// block; EncryptBlock() must guarantee that plainSize <=
// len(encryptedBlock).
EncryptBlock(block Block, key kbfscrypto.BlockCryptKey) (
plainSize int, encryptedBlock EncryptedBlock, err error)
// DecryptBlock decrypts a block. Similar to EncryptBlock(),
// DecryptBlock() must guarantee that (size of the decrypted
// block) <= len(encryptedBlock).
DecryptBlock(encryptedBlock EncryptedBlock,
key kbfscrypto.BlockCryptKey, block Block) error
// GetTLFCryptKeyServerHalfID creates a unique ID for this particular
// kbfscrypto.TLFCryptKeyServerHalf.
GetTLFCryptKeyServerHalfID(
user keybase1.UID, deviceKID keybase1.KID,
serverHalf kbfscrypto.TLFCryptKeyServerHalf) (
TLFCryptKeyServerHalfID, error)
// VerifyTLFCryptKeyServerHalfID verifies the ID is the proper HMAC result.
VerifyTLFCryptKeyServerHalfID(serverHalfID TLFCryptKeyServerHalfID,
user keybase1.UID, deviceKID keybase1.KID,
serverHalf kbfscrypto.TLFCryptKeyServerHalf) error
// EncryptMerkleLeaf encrypts a Merkle leaf node with the TLFPublicKey.
EncryptMerkleLeaf(leaf MerkleLeaf, pubKey kbfscrypto.TLFPublicKey,
nonce *[24]byte, ePrivKey kbfscrypto.TLFEphemeralPrivateKey) (
EncryptedMerkleLeaf, error)
// DecryptMerkleLeaf decrypts a Merkle leaf node with the TLFPrivateKey.
DecryptMerkleLeaf(encryptedLeaf EncryptedMerkleLeaf,
privKey kbfscrypto.TLFPrivateKey, nonce *[24]byte,
ePubKey kbfscrypto.TLFEphemeralPublicKey) (*MerkleLeaf, error)
// MakeTLFWriterKeyBundleID hashes a TLFWriterKeyBundleV3 to create an ID.
MakeTLFWriterKeyBundleID(wkb *TLFWriterKeyBundleV3) (TLFWriterKeyBundleID, error)
// MakeTLFReaderKeyBundleID hashes a TLFReaderKeyBundleV3 to create an ID.
MakeTLFReaderKeyBundleID(rkb *TLFReaderKeyBundleV3) (TLFReaderKeyBundleID, error)
// EncryptTLFCryptKeys encrypts an array of historic TLFCryptKeys.
EncryptTLFCryptKeys(oldKeys []kbfscrypto.TLFCryptKey,
key kbfscrypto.TLFCryptKey) (
EncryptedTLFCryptKeys, error)
// DecryptTLFCryptKeys decrypts an array of historic TLFCryptKeys.
DecryptTLFCryptKeys(
encKeys EncryptedTLFCryptKeys, key kbfscrypto.TLFCryptKey) (
[]kbfscrypto.TLFCryptKey, error)
}
// Duplicate kbfscrypto.Signer here to work around gomock's
// limitations.
type cryptoSigner interface {
Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error)
SignToString(context.Context, []byte) (string, error)
}
// Crypto signs, verifies, encrypts, and decrypts stuff.
type Crypto interface {
cryptoPure
cryptoSigner
// DecryptTLFCryptKeyClientHalf decrypts a
// kbfscrypto.TLFCryptKeyClientHalf using the current device's
// private key and the TLF's ephemeral public key.
DecryptTLFCryptKeyClientHalf(ctx context.Context,
publicKey kbfscrypto.TLFEphemeralPublicKey,
encryptedClientHalf EncryptedTLFCryptKeyClientHalf) (
kbfscrypto.TLFCryptKeyClientHalf, error)
// DecryptTLFCryptKeyClientHalfAny decrypts one of the
// kbfscrypto.TLFCryptKeyClientHalf using the available
// private keys and the ephemeral public key. If promptPaper
// is true, the service will prompt the user for any unlocked
// paper keys.
DecryptTLFCryptKeyClientHalfAny(ctx context.Context,
keys []EncryptedTLFCryptKeyClientAndEphemeral,
promptPaper bool) (
kbfscrypto.TLFCryptKeyClientHalf, int, error)
// Shutdown frees any resources associated with this instance.
Shutdown()
}
// MDOps gets and puts root metadata to an MDServer. On a get, it
// verifies the metadata is signed by the metadata's signing key.
type MDOps interface {
// GetForHandle returns the current metadata object
// corresponding to the given top-level folder's handle and
// merge status, if the logged-in user has read permission on
// the folder. It creates the folder if one doesn't exist
// yet, and the logged-in user has permission to do so.
GetForHandle(
ctx context.Context, handle *TlfHandle, mStatus MergeStatus) (
TlfID, ImmutableRootMetadata, error)
// GetForTLF returns the current metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id TlfID) (ImmutableRootMetadata, error)
// GetUnmergedForTLF is the same as the above but for unmerged
// metadata.
GetUnmergedForTLF(ctx context.Context, id TlfID, bid BranchID) (
ImmutableRootMetadata, error)
// GetRange returns a range of metadata objects corresponding to
// the passed revision numbers (inclusive).
GetRange(ctx context.Context, id TlfID, start, stop MetadataRevision) (
[]ImmutableRootMetadata, error)
// GetUnmergedRange is the same as the above but for unmerged
// metadata history (inclusive).
GetUnmergedRange(ctx context.Context, id TlfID, bid BranchID,
start, stop MetadataRevision) ([]ImmutableRootMetadata, error)
// Put stores the metadata object for the given
// top-level folder.
Put(ctx context.Context, rmd *RootMetadata) (MdID, error)
// PutUnmerged is the same as the above but for unmerged
// metadata history.
PutUnmerged(ctx context.Context, rmd *RootMetadata) (MdID, error)
// PruneBranch prunes all unmerged history for the given TLF
// branch.
PruneBranch(ctx context.Context, id TlfID, bid BranchID) error
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state.
GetLatestHandleForTLF(ctx context.Context, id TlfID) (
BareTlfHandle, error)
}
// KeyOps fetches server-side key halves from the key server.
type KeyOps interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
serverKeyHalves map[keybase1.UID]map[keybase1.KID]kbfscrypto.TLFCryptKeyServerHalf) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, kid keybase1.KID,
serverHalfID TLFCryptKeyServerHalfID) error
}
// BlockOps gets and puts data blocks to a BlockServer. It performs
// the necessary crypto operations on each block.
type BlockOps interface {
// Get gets the block associated with the given block pointer
// (which belongs to the TLF with the given key metadata),
// decrypts it if necessary, and fills in the provided block
// object with its contents, if the logged-in user has read
// permission for that block.
Get(ctx context.Context, kmd KeyMetadata, blockPtr BlockPointer,
block Block) error
// Ready turns the given block (which belongs to the TLF with
// the given key metadata) into encoded (and encrypted) data,
// and calculates its ID and size, so that we can do a bunch
// of block puts in parallel for every write. Ready() must
// guarantee that plainSize <= readyBlockData.QuotaSize().
Ready(ctx context.Context, kmd KeyMetadata, block Block) (
id BlockID, plainSize int, readyBlockData ReadyBlockData, err error)
// Delete instructs the server to delete the given block references.
// It returns the number of not-yet deleted references to
// each block reference
Delete(ctx context.Context, tlfID TlfID, ptrs []BlockPointer) (
liveCounts map[BlockID]int, err error)
// Archive instructs the server to mark the given block references
// as "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
Archive(ctx context.Context, tlfID TlfID, ptrs []BlockPointer) error
}
// Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around
// gomock's limitations.
type authTokenRefreshHandler interface {
RefreshAuthToken(context.Context)
}
// MDServer gets and puts metadata for each top-level directory. The
// instantiation should be able to fetch session/user details via KBPKI. On a
// put, the server is responsible for 1) ensuring the user has appropriate
// permissions for whatever modifications were made; 2) ensuring that
// LastModifyingWriter and LastModifyingUser are updated appropriately; and 3)
// detecting conflicting writes based on the previous root block ID (i.e., when
// it supports strict consistency). On a get, it verifies the logged-in user
// has read permissions.
//
// TODO: Add interface for searching by time
type MDServer interface {
authTokenRefreshHandler
// GetForHandle returns the current (signed/encrypted) metadata
// object corresponding to the given top-level folder's handle, if
// the logged-in user has read permission on the folder. It
// creates the folder if one doesn't exist yet, and the logged-in
// user has permission to do so.
GetForHandle(ctx context.Context, handle BareTlfHandle,
mStatus MergeStatus) (TlfID, *RootMetadataSigned, error)
// GetForTLF returns the current (signed/encrypted) metadata object
// corresponding to the given top-level folder, if the logged-in
// user has read permission on the folder.
GetForTLF(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus) (
*RootMetadataSigned, error)
// GetRange returns a range of (signed/encrypted) metadata objects
// corresponding to the passed revision numbers (inclusive).
GetRange(ctx context.Context, id TlfID, bid BranchID, mStatus MergeStatus,
start, stop MetadataRevision) ([]*RootMetadataSigned, error)
// Put stores the (signed/encrypted) metadata object for the given
// top-level folder. Note: If the unmerged bit is set in the metadata
// block's flags bitmask it will be appended to the unmerged per-device
// history.
Put(ctx context.Context, rmds *RootMetadataSigned, extra ExtraMetadata) error
// PruneBranch prunes all unmerged history for the given TLF branch.
PruneBranch(ctx context.Context, id TlfID, bid BranchID) error
// RegisterForUpdate tells the MD server to inform the caller when
// there is a merged update with a revision number greater than
// currHead, which did NOT originate from this same MD server
// session. This method returns a chan which can receive only a
// single error before it's closed. If the received err is nil,
// then there is updated MD ready to fetch which didn't originate
// locally; if it is non-nil, then the previous registration
// cannot send the next notification (e.g., the connection to the
// MD server may have failed). In either case, the caller must
// re-register to get a new chan that can receive future update
// notifications.
RegisterForUpdate(ctx context.Context, id TlfID,
currHead MetadataRevision) (<-chan error, error)
// CheckForRekeys initiates the rekey checking process on the
// server. The server is allowed to delay this request, and so it
// returns a channel for returning the error. Actual rekey
// requests are expected to come in asynchronously.
CheckForRekeys(ctx context.Context) <-chan error
// TruncateLock attempts to take the history truncation lock for
// this folder, for a TTL defined by the server. Returns true if
// the lock was successfully taken.
TruncateLock(ctx context.Context, id TlfID) (bool, error)
// TruncateUnlock attempts to release the history truncation lock
// for this folder. Returns true if the lock was successfully
// released.
TruncateUnlock(ctx context.Context, id TlfID) (bool, error)
// DisableRekeyUpdatesForTesting disables processing rekey updates
// received from the mdserver while testing.
DisableRekeyUpdatesForTesting()
// Shutdown is called to shutdown an MDServer connection.
Shutdown()
// IsConnected returns whether the MDServer is connected.
IsConnected() bool
// GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF,
// which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it
// entered into a conflicting state. For the highest level of confidence, the caller
// should verify the mapping with a Merkle tree lookup.
GetLatestHandleForTLF(ctx context.Context, id TlfID) (
BareTlfHandle, error)
// OffsetFromServerTime is the current estimate for how off our
// local clock is from the mdserver clock. Add this to any
// mdserver-provided timestamps to get the "local" time of the
// corresponding event. If the returned bool is false, then we
// don't have a current estimate for the offset.
OffsetFromServerTime() (time.Duration, bool)
// GetKeyBundles returns the key bundles for the given key bundle IDs.
GetKeyBundles(ctx context.Context,
wkbID TLFWriterKeyBundleID,
rkbID TLFReaderKeyBundleID) (
*TLFWriterKeyBundleV3, *TLFReaderKeyBundleV3, error)
}
type mdServerLocal interface {
MDServer
addNewAssertionForTest(
uid keybase1.UID, newAssertion keybase1.SocialAssertion) error
getCurrentMergedHeadRevision(ctx context.Context, id TlfID) (
rev MetadataRevision, err error)
isShutdown() bool
copy(config mdServerLocalConfig) mdServerLocal
}
// BlockServer gets and puts opaque data blocks. The instantiation
// should be able to fetch session/user details via KBPKI. On a
// put/delete, the server is reponsible for: 1) checking that the ID
// matches the hash of the buffer; and 2) enforcing writer quotas.
type BlockServer interface {
authTokenRefreshHandler
// Get gets the (encrypted) block data associated with the given
// block ID and context, uses the provided block key to decrypt
// the block, and fills in the provided block object with its
// contents, if the logged-in user has read permission for that
// block.
Get(ctx context.Context, tlfID TlfID, id BlockID, context BlockContext) (
[]byte, kbfscrypto.BlockCryptKeyServerHalf, error)
// Put stores the (encrypted) block data under the given ID and
// context on the server, along with the server half of the block
// key. context should contain a BlockRefNonce of zero. There
// will be an initial reference for this block for the given
// context.
//
// Put should be idempotent, although it should also return an
// error if, for a given ID, any of the other arguments differ
// from previous Put calls with the same ID.
//
// If this returns a BServerErrorOverQuota, with Throttled=false,
// the caller can treat it as informational and otherwise ignore
// the error.
Put(ctx context.Context, tlfID TlfID, id BlockID, context BlockContext,
buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error
// AddBlockReference adds a new reference to the given block,
// defined by the given context (which should contain a non-zero
// BlockRefNonce). (Contexts with a BlockRefNonce of zero should
// be used when putting the block for the first time via Put().)
// Returns a BServerErrorBlockNonExistent if id is unknown within
// this folder.
//
// AddBlockReference should be idempotent, although it should
// also return an error if, for a given ID and refnonce, any
// of the other fields of context differ from previous
// AddBlockReference calls with the same ID and refnonce.
//
// If this returns a BServerErrorOverQuota, with Throttled=false,
// the caller can treat it as informational and otherwise ignore
// the error.
AddBlockReference(ctx context.Context, tlfID TlfID, id BlockID,
context BlockContext) error
// RemoveBlockReferences removes the references to the given block
// ID defined by the given contexts. If no references to the block
// remain after this call, the server is allowed to delete the
// corresponding block permanently. If the reference defined by
// the count has already been removed, the call is a no-op.
// It returns the number of remaining not-yet-deleted references after this
// reference has been removed
RemoveBlockReferences(ctx context.Context, tlfID TlfID,
contexts map[BlockID][]BlockContext) (liveCounts map[BlockID]int, err error)
// ArchiveBlockReferences marks the given block references as
// "archived"; that is, they are not being used in the current
// view of the folder, and shouldn't be served to anyone other
// than folder writers.
//
// For a given ID/refnonce pair, ArchiveBlockReferences should
// be idempotent, although it should also return an error if
// any of the other fields of the context differ from previous
// calls with the same ID/refnonce pair.
ArchiveBlockReferences(ctx context.Context, tlfID TlfID,
contexts map[BlockID][]BlockContext) error
// Shutdown is called to shutdown a BlockServer connection.
Shutdown()
// GetUserQuotaInfo returns the quota for the user.
GetUserQuotaInfo(ctx context.Context) (info *UserQuotaInfo, err error)
}
type blockRefLocalStatus int
const (
liveBlockRef blockRefLocalStatus = 1
archivedBlockRef = 2
)
// blockServerLocal is the interface for BlockServer implementations
// that store data locally.
type blockServerLocal interface {
BlockServer
// getAll returns all the known block references, and should only be
// used during testing.
getAll(ctx context.Context, tlfID TlfID) (
map[BlockID]map[BlockRefNonce]blockRefLocalStatus, error)
}
// BlockSplitter decides when a file or directory block needs to be split
type BlockSplitter interface {
// CopyUntilSplit copies data into the block until we reach the
// point where we should split, but only if writing to the end of
// the last block. If this is writing into the middle of a file,
// just copy everything that will fit into the block, and assume
// that block boundaries will be fixed later. Return how much was
// copied.
CopyUntilSplit(
block *FileBlock, lastBlock bool, data []byte, off int64) int64
// CheckSplit, given a block, figures out whether it ends at the
// right place. If so, return 0. If not, return either the
// offset in the block where it should be split, or -1 if more
// bytes from the next block should be appended.
CheckSplit(block *FileBlock) int64
// ShouldEmbedBlockChanges decides whether we should keep the
// block changes embedded in the MD or not.
ShouldEmbedBlockChanges(bc *BlockChanges) bool
}
// KeyServer fetches/writes server-side key halves from/to the key server.
type KeyServer interface {
// GetTLFCryptKeyServerHalf gets a server-side key half for a
// device given the key half ID.
GetTLFCryptKeyServerHalf(ctx context.Context,
serverHalfID TLFCryptKeyServerHalfID,
cryptPublicKey kbfscrypto.CryptPublicKey) (
kbfscrypto.TLFCryptKeyServerHalf, error)
// PutTLFCryptKeyServerHalves stores a server-side key halves for a
// set of users and devices.
PutTLFCryptKeyServerHalves(ctx context.Context,
serverKeyHalves map[keybase1.UID]map[keybase1.KID]kbfscrypto.TLFCryptKeyServerHalf) error
// DeleteTLFCryptKeyServerHalf deletes a server-side key half for a
// device given the key half ID.
DeleteTLFCryptKeyServerHalf(ctx context.Context,
uid keybase1.UID, kid keybase1.KID,
serverHalfID TLFCryptKeyServerHalfID) error
// Shutdown is called to free any KeyServer resources.
Shutdown()
}
// NodeChange represents a change made to a node as part of an atomic
// file system operation.
type NodeChange struct {
Node Node
// Basenames of entries added/removed.
DirUpdated []string
FileUpdated []WriteRange
}
// Observer can be notified that there is an available update for a
// given directory. The notification callbacks should not block, or
// make any calls to the Notifier interface. Nodes passed to the
// observer should not be held past the end of the notification
// callback.
type Observer interface {
// LocalChange announces that the file at this Node has been
// updated locally, but not yet saved at the server.
LocalChange(ctx context.Context, node Node, write WriteRange)
// BatchChanges announces that the nodes have all been updated
// together atomically. Each NodeChange in changes affects the
// same top-level folder and branch.
BatchChanges(ctx context.Context, changes []NodeChange)
// TlfHandleChange announces that the handle of the corresponding
// folder branch has changed, likely due to previously-unresolved
// assertions becoming resolved. This indicates that the listener
// should switch over any cached paths for this folder-branch to
// the new name. Nodes that were acquired under the old name will
// still continue to work, but new lookups on the old name may
// either encounter alias errors or entirely new TLFs (in the case
// of conflicts).
TlfHandleChange(ctx context.Context, newHandle *TlfHandle)
}
// Notifier notifies registrants of directory changes
type Notifier interface {
// RegisterForChanges declares that the given Observer wants to
// subscribe to updates for the given top-level folders.
RegisterForChanges(folderBranches []FolderBranch, obs Observer) error
// UnregisterFromChanges declares that the given Observer no
// longer wants to subscribe to updates for the given top-level
// folders.
UnregisterFromChanges(folderBranches []FolderBranch, obs Observer) error
}
// Clock is an interface for getting the current time
type Clock interface {
// Now returns the current time.
Now() time.Time
}
// ConflictRenamer deals with names for conflicting directory entries.
type ConflictRenamer interface {
// ConflictRename returns the appropriately modified filename.
ConflictRename(op op, original string) string
}
// Config collects all the singleton instance instantiations needed to
// run KBFS in one place. The methods below are self-explanatory and
// do not require comments.
type Config interface {
KBFSOps() KBFSOps
SetKBFSOps(KBFSOps)
KBPKI() KBPKI
SetKBPKI(KBPKI)
KeyManager() KeyManager
SetKeyManager(KeyManager)
Reporter() Reporter
SetReporter(Reporter)
MDCache() MDCache
SetMDCache(MDCache)
KeyCache() KeyCache
SetKeyCache(KeyCache)
BlockCache() BlockCache
SetBlockCache(BlockCache)
DirtyBlockCache() DirtyBlockCache
SetDirtyBlockCache(DirtyBlockCache)
Crypto() Crypto
SetCrypto(Crypto)
Codec() kbfscodec.Codec
SetCodec(kbfscodec.Codec)
MDOps() MDOps
SetMDOps(MDOps)
KeyOps() KeyOps
SetKeyOps(KeyOps)
BlockOps() BlockOps
SetBlockOps(BlockOps)
MDServer() MDServer
SetMDServer(MDServer)
BlockServer() BlockServer
SetBlockServer(BlockServer)
KeyServer() KeyServer
SetKeyServer(KeyServer)
KeybaseService() KeybaseService
SetKeybaseService(KeybaseService)
BlockSplitter() BlockSplitter
SetBlockSplitter(BlockSplitter)
Notifier() Notifier
SetNotifier(Notifier)
Clock() Clock
SetClock(Clock)
ConflictRenamer() ConflictRenamer
SetConflictRenamer(ConflictRenamer)
MetadataVersion() MetadataVer
DataVersion() DataVer
RekeyQueue() RekeyQueue
SetRekeyQueue(RekeyQueue)
// ReqsBufSize indicates the number of read or write operations
// that can be buffered per folder
ReqsBufSize() int
// MaxFileBytes indicates the maximum supported plaintext size of
// a file in bytes.
MaxFileBytes() uint64
// MaxNameBytes indicates the maximum supported size of a
// directory entry name in bytes.
MaxNameBytes() uint32
// MaxDirBytes indicates the maximum supported plaintext size of a
// directory in bytes.
MaxDirBytes() uint64
// DoBackgroundFlushes says whether we should periodically try to
// flush dirty files, even without a sync from the user. Should
// be true except for during some testing.
DoBackgroundFlushes() bool
SetDoBackgroundFlushes(bool)
// RekeyWithPromptWaitTime indicates how long to wait, after
// setting the rekey bit, before prompting for a paper key.
RekeyWithPromptWaitTime() time.Duration
// GracePeriod specifies a grace period for which a delayed cancellation
// waits before actual cancels the context. This is useful for giving
// critical portion of a slow remote operation some extra time to finish as
// an effort to avoid conflicting. Example include an O_EXCL Create call
// interrupted by ALRM signal actually makes it to the server, while
// application assumes not since EINTR is returned. A delayed cancellation
// allows us to distinguish between successful cancel (where remote operation
// didn't make to server) or failed cancel (where remote operation made to
// the server). However, the optimal value of this depends on the network
// conditions. A long grace period for really good network condition would
// just unnecessarily slow down Ctrl-C.
//
// TODO: make this adaptive and self-change over time based on network
// conditions.
DelayedCancellationGracePeriod() time.Duration
SetDelayedCancellationGracePeriod(time.Duration)
// QuotaReclamationPeriod indicates how often should each TLF
// should check for quota to reclaim. If the Duration.Seconds()
// == 0, quota reclamation should not run automatically.
QuotaReclamationPeriod() time.Duration
// QuotaReclamationMinUnrefAge indicates the minimum time a block
// must have been unreferenced before it can be reclaimed.
QuotaReclamationMinUnrefAge() time.Duration
// QuotaReclamationMinHeadAge indicates the minimum age of the
// most recently merged MD update before we can run reclamation,
// to avoid conflicting with a currently active writer.
QuotaReclamationMinHeadAge() time.Duration
// ResetCaches clears and re-initializes all data and key caches.
ResetCaches()
MakeLogger(module string) logger.Logger
SetLoggerMaker(func(module string) logger.Logger)
// MetricsRegistry may be nil, which should be interpreted as
// not using metrics at all. (i.e., as if UseNilMetrics were
// set). This differs from how go-metrics treats nil Registry
// objects, which is to use the default registry.
MetricsRegistry() metrics.Registry
SetMetricsRegistry(metrics.Registry)
// TLFValidDuration is the time TLFs are valid before identification needs to be redone.
TLFValidDuration() time.Duration
// SetTLFValidDuration sets TLFValidDuration.
SetTLFValidDuration(time.Duration)
// Shutdown is called to free config resources.
Shutdown() error
// CheckStateOnShutdown tells the caller whether or not it is safe
// to check the state of the system on shutdown.
CheckStateOnShutdown() bool
}
// NodeCache holds Nodes, and allows libkbfs to update them when
// things change about the underlying KBFS blocks. It is probably
// most useful to instantiate this on a per-folder-branch basis, so
// that it can create a Path with the correct DirId and Branch name.
type NodeCache interface {
// GetOrCreate either makes a new Node for the given
// BlockPointer, or returns an existing one. TODO: If we ever
// support hard links, we will have to revisit the "name" and
// "parent" parameters here. name must not be empty. Returns
// an error if parent cannot be found.
GetOrCreate(ptr BlockPointer, name string, parent Node) (Node, error)
// Get returns the Node associated with the given ptr if one
// already exists. Otherwise, it returns nil.
Get(ref blockRef) Node
// UpdatePointer updates the BlockPointer for the corresponding
// Node. NodeCache ignores this call when oldRef is not cached in
// any Node.
UpdatePointer(oldRef blockRef, newPtr BlockPointer)
// Move swaps the parent node for the corresponding Node, and
// updates the node's name. NodeCache ignores the call when ptr
// is not cached. Returns an error if newParent cannot be found.
// If newParent is nil, it treats the ptr's corresponding node as
// being unlinked from the old parent completely.
Move(ref blockRef, newParent Node, newName string) error
// Unlink set the corresponding node's parent to nil and caches
// the provided path in case the node is still open. NodeCache
// ignores the call when ptr is not cached. The path is required
// because the caller may have made changes to the parent nodes
// already that shouldn't be reflected in the cached path.
Unlink(ref blockRef, oldPath path)
// PathFromNode creates the path up to a given Node.
PathFromNode(node Node) path
}
// fileBlockDeepCopier fetches a file block, makes a deep copy of it
// (duplicating pointer for any indirect blocks) and generates a new
// random temporary block ID for it. It returns the new BlockPointer,
// and internally saves the block for future uses.
type fileBlockDeepCopier func(context.Context, string, BlockPointer) (
BlockPointer, error)
// crAction represents a specific action to take as part of the
// conflict resolution process.
type crAction interface {
// swapUnmergedBlock should be called before do(), and if it
// returns true, the caller must use the merged block
// corresponding to the returned BlockPointer instead of
// unmergedBlock when calling do(). If BlockPointer{} is zeroPtr
// (and true is returned), just swap in the regular mergedBlock.
swapUnmergedBlock(unmergedChains *crChains, mergedChains *crChains,
unmergedBlock *DirBlock) (bool, BlockPointer, error)
// do modifies the given merged block in place to resolve the
// conflict, and potential uses the provided blockCopyFetchers to
// obtain copies of other blocks (along with new BlockPointers)
// when requiring a block copy.
do(ctx context.Context, unmergedCopier fileBlockDeepCopier,
mergedCopier fileBlockDeepCopier, unmergedBlock *DirBlock,
mergedBlock *DirBlock) error
// updateOps potentially modifies, in place, the slices of
// unmerged and merged operations stored in the corresponding
// crChains for the given unmerged and merged most recent
// pointers. Eventually, the "unmerged" ops will be pushed as
// part of a MD update, and so should contain any necessarily
// operations to fully merge the unmerged data, including any
// conflict resolution. The "merged" ops will be played through
// locally, to notify any caches about the newly-obtained merged
// data (and any changes to local data that were required as part
// of conflict resolution, such as renames). A few things to note:
// * A particular action's updateOps method may be called more than
// once for different sets of chains, however it should only add
// new directory operations (like create/rm/rename) into directory
// chains.
// * updateOps doesn't necessarily result in correct BlockPointers within
// each of those ops; that must happen in a later phase.
// * mergedBlock can be nil if the chain is for a file.
updateOps(unmergedMostRecent BlockPointer, mergedMostRecent BlockPointer,
unmergedBlock *DirBlock, mergedBlock *DirBlock,
unmergedChains *crChains, mergedChains *crChains) error
// String returns a string representation for this crAction, used
// for debugging.
String() string
}
// RekeyQueue is a managed queue of folders needing some rekey action taken upon them
// by the current client.
type RekeyQueue interface {
// Enqueue enqueues a folder for rekey action.
Enqueue(TlfID) <-chan error
// IsRekeyPending returns true if the given folder is in the rekey queue.
IsRekeyPending(TlfID) bool
// GetRekeyChannel will return any rekey completion channel (if pending.)
GetRekeyChannel(id TlfID) <-chan error
// Clear cancels all pending rekey actions and clears the queue.
Clear()
// Waits for all queued rekeys to finish
Wait(ctx context.Context) error
}
// BareRootMetadata is a read-only interface to the bare serializeable MD that
// is signed by the reader or writer.
type BareRootMetadata interface {
// TlfID returns the ID of the TLF this BareRootMetadata is for.
TlfID() TlfID
// LatestKeyGeneration returns the most recent key generation in this
// BareRootMetadata, or PublicKeyGen if this TLF is public.
LatestKeyGeneration() KeyGen
// IsValidRekeyRequest returns true if the current block is a simple rekey wrt
// the passed block.
IsValidRekeyRequest(codec kbfscodec.Codec, prevMd BareRootMetadata,
user keybase1.UID, prevExtra, extra ExtraMetadata) (bool, error)
// MergedStatus returns the status of this update -- has it been
// merged into the main folder or not?
MergedStatus() MergeStatus
// IsRekeySet returns true if the rekey bit is set.
IsRekeySet() bool
// IsWriterMetadataCopiedSet returns true if the bit is set indicating
// the writer metadata was copied.
IsWriterMetadataCopiedSet() bool
// IsFinal returns true if this is the last metadata block for a given
// folder. This is only expected to be set for folder resets.
IsFinal() bool
// IsWriter returns whether or not the user+device is an authorized writer.
IsWriter(user keybase1.UID, deviceKID keybase1.KID, extra ExtraMetadata) bool
// IsReader returns whether or not the user+device is an authorized reader.
IsReader(user keybase1.UID, deviceKID keybase1.KID, extra ExtraMetadata) bool
// DeepCopy returns a deep copy of the underlying data structure.
DeepCopy(codec kbfscodec.Codec) (BareRootMetadata, error)
// MakeSuccessorCopy returns a newly constructed successor copy to this metadata revision.
// It differs from DeepCopy in that it can perform an up conversion to a new metadata
// version.
MakeSuccessorCopy(codec kbfscodec.Codec) (BareRootMetadata, error)
// CheckValidSuccessor makes sure the given BareRootMetadata is a valid
// successor to the current one, and returns an error otherwise.
CheckValidSuccessor(currID MdID, nextMd BareRootMetadata) error
// CheckValidSuccessorForServer is like CheckValidSuccessor but with
// server-specific error messages.
CheckValidSuccessorForServer(currID MdID, nextMd BareRootMetadata) error
// MakeBareTlfHandle makes a BareTlfHandle for this
// BareRootMetadata. Should be used only by servers and MDOps.
MakeBareTlfHandle(extra ExtraMetadata) (BareTlfHandle, error)
// TlfHandleExtensions returns a list of handle extensions associated with the TLf.
TlfHandleExtensions() (extensions []TlfHandleExtension)
// GetDeviceKIDs returns the KIDs (of
// kbfscrypto.CryptPublicKeys) for all known devices for the
// given user at the given key generation, if any. Returns an
// error if the TLF is public, or if the given key generation
// is invalid.
GetDeviceKIDs(keyGen KeyGen, user keybase1.UID, extra ExtraMetadata) (
[]keybase1.KID, error)
// HasKeyForUser returns whether or not the given user has keys for at
// least one device at the given key generation. Returns false if the
// TLF is public, or if the given key generation is invalid. Equivalent to:
//
// kids, err := GetDeviceKIDs(keyGen, user)
// return (err == nil) && (len(kids) > 0)
HasKeyForUser(keyGen KeyGen, user keybase1.UID, extra ExtraMetadata) bool
// GetTLFCryptKeyParams returns all the necessary info to construct
// the TLF crypt key for the given key generation, user, and device
// (identified by its crypt public key), or false if not found. This
// returns an error if the TLF is public.
GetTLFCryptKeyParams(keyGen KeyGen, user keybase1.UID,
key kbfscrypto.CryptPublicKey, extra ExtraMetadata) (
kbfscrypto.TLFEphemeralPublicKey,
EncryptedTLFCryptKeyClientHalf,
TLFCryptKeyServerHalfID, bool, error)
// IsValidAndSigned verifies the BareRootMetadata, checks the
// writer signature, and returns an error if a problem was
// found. This should be the first thing checked on a BRMD
// retrieved from an untrusted source, and then the signing
// user and key should be validated, either by comparing to
// the current device key (using IsLastModifiedBy), or by
// checking with KBPKI.
IsValidAndSigned(codec kbfscodec.Codec,
crypto cryptoPure, extra ExtraMetadata) error
// IsLastModifiedBy verifies that the BareRootMetadata is
// written by the given user and device (identified by the KID
// of the device verifying key), and returns an error if not.
IsLastModifiedBy(uid keybase1.UID, key kbfscrypto.VerifyingKey) error
// LastModifyingWriter return the UID of the last user to modify the writer metadata.
LastModifyingWriter() keybase1.UID
// LastModifyingWriterKID returns the KID of the last device to modify the writer metadata.
LastModifyingWriterKID() keybase1.KID
// LastModifyingUser return the UID of the last user to modify the any of the metadata.
GetLastModifyingUser() keybase1.UID
// RefBytes returns the number of newly referenced bytes introduced by this revision of metadata.
RefBytes() uint64
// UnrefBytes returns the number of newly unreferenced bytes introduced by this revision of metadata.
UnrefBytes() uint64
// DiskUsage returns the estimated disk usage for the folder as of this revision of metadata.
DiskUsage() uint64
// RevisionNumber returns the revision number associated with this metadata structure.
RevisionNumber() MetadataRevision
// BID returns the per-device branch ID associated with this metadata revision.
BID() BranchID
// GetPrevRoot returns the hash of the previous metadata revision.
GetPrevRoot() MdID
// IsUnmergedSet returns true if the unmerged bit is set.
IsUnmergedSet() bool
// GetSerializedPrivateMetadata returns the serialized private metadata as a byte slice.
GetSerializedPrivateMetadata() []byte
// GetSerializedWriterMetadata serializes the underlying writer metadata and returns the result.
GetSerializedWriterMetadata(codec kbfscodec.Codec) ([]byte, error)
// GetWriterMetadataSigInfo returns the signature info associated with the the writer metadata.
GetWriterMetadataSigInfo() kbfscrypto.SignatureInfo
// Version returns the metadata version.
Version() MetadataVer
// GetTLFPublicKey returns the TLF public key for the give key generation.
// Note the *TLFWriterKeyBundleV3 is expected to be nil for pre-v3 metadata.
GetTLFPublicKey(KeyGen, ExtraMetadata) (kbfscrypto.TLFPublicKey, bool)
// AreKeyGenerationsEqual returns true if all key generations in the passed metadata are equal to those
// in this revision.
AreKeyGenerationsEqual(kbfscodec.Codec, BareRootMetadata) (bool, error)
// GetUnresolvedParticipants returns any unresolved readers and writers present in this revision of metadata.
GetUnresolvedParticipants() (readers, writers []keybase1.SocialAssertion)
// GetTLFWriterKeyBundleID returns the ID of the externally-stored writer key bundle, or the zero value if
// this object stores it internally.
GetTLFWriterKeyBundleID() TLFWriterKeyBundleID
// GetTLFReaderKeyBundleID returns the ID of the externally-stored reader key bundle, or the zero value if
// this object stores it internally.
GetTLFReaderKeyBundleID() TLFReaderKeyBundleID
// StoresHistoricTLFCryptKeys returns whether or not history keys are symmetrically encrypted; if not, they're
// encrypted per-device.
StoresHistoricTLFCryptKeys() bool
// GetHistoricTLFCryptKey attempts to symmetrically decrypt the key at the given
// generation using the current generation's TLFCryptKey.
GetHistoricTLFCryptKey(c cryptoPure, keyGen KeyGen,
currentKey kbfscrypto.TLFCryptKey, extra ExtraMetadata) (
kbfscrypto.TLFCryptKey, error)
}
// MutableBareRootMetadata is a mutable interface to the bare serializeable MD that is signed by the reader or writer.
type MutableBareRootMetadata interface {
BareRootMetadata
// SetRefBytes sets the number of newly referenced bytes introduced by this revision of metadata.
SetRefBytes(refBytes uint64)
// SetUnrefBytes sets the number of newly unreferenced bytes introduced by this revision of metadata.
SetUnrefBytes(unrefBytes uint64)
// SetDiskUsage sets the estimated disk usage for the folder as of this revision of metadata.
SetDiskUsage(diskUsage uint64)
// AddRefBytes increments the number of newly referenced bytes introduced by this revision of metadata.
AddRefBytes(refBytes uint64)
// AddUnrefBytes increments the number of newly unreferenced bytes introduced by this revision of metadata.
AddUnrefBytes(unrefBytes uint64)
// AddDiskUsage increments the estimated disk usage for the folder as of this revision of metadata.
AddDiskUsage(diskUsage uint64)
// ClearRekeyBit unsets any set rekey bit.
ClearRekeyBit()
// ClearWriterMetadataCopiedBit unsets any set writer metadata copied bit.
ClearWriterMetadataCopiedBit()
// ClearFinalBit unsets any final bit.
ClearFinalBit()
// SetUnmerged sets the unmerged bit.
SetUnmerged()
// SetBranchID sets the branch ID for this metadata revision.
SetBranchID(bid BranchID)
// SetPrevRoot sets the hash of the previous metadata revision.
SetPrevRoot(mdID MdID)
// SetSerializedPrivateMetadata sets the serialized private metadata.
SetSerializedPrivateMetadata(spmd []byte)
// SetWriterMetadataSigInfo sets the signature info associated with the the writer metadata.
SetWriterMetadataSigInfo(sigInfo kbfscrypto.SignatureInfo)
// SetLastModifyingWriter sets the UID of the last user to modify the writer metadata.
SetLastModifyingWriter(user keybase1.UID)
// SetLastModifyingUser sets the UID of the last user to modify any of the metadata.
SetLastModifyingUser(user keybase1.UID)
// SetRekeyBit sets the rekey bit.
SetRekeyBit()
// SetFinalBit sets the finalized bit.
SetFinalBit()
// SetWriterMetadataCopiedBit set the writer metadata copied bit.
SetWriterMetadataCopiedBit()
// SetRevision sets the revision number of the underlying metadata.
SetRevision(revision MetadataRevision)
// AddNewKeysForTesting adds new writer and reader TLF key bundles to this revision of metadata.
// Note: This is only used for testing at the moment.
AddNewKeysForTesting(crypto cryptoPure, wDkim, rDkim UserDeviceKeyInfoMap) (ExtraMetadata, error)
// NewKeyGeneration adds a new key generation to this revision of metadata.
NewKeyGeneration(pubKey kbfscrypto.TLFPublicKey) (extra ExtraMetadata)
// SetUnresolvedReaders sets the list of unresolved readers assoiated with this folder.
SetUnresolvedReaders(readers []keybase1.SocialAssertion)
// SetUnresolvedWriters sets the list of unresolved writers assoiated with this folder.
SetUnresolvedWriters(writers []keybase1.SocialAssertion)
// SetConflictInfo sets any conflict info associated with this metadata revision.
SetConflictInfo(ci *TlfHandleExtension)
// SetFinalizedInfo sets any finalized info associated with this metadata revision.
SetFinalizedInfo(fi *TlfHandleExtension)
// SetWriters sets the list of writers associated with this folder.
SetWriters(writers []keybase1.UID)
// SetTlfID sets the ID of the underlying folder in the metadata structure.
SetTlfID(tlf TlfID)
// FakeInitialRekey fakes the initial rekey for the given
// BareRootMetadata. This is necessary since newly-created
// BareRootMetadata objects don't have enough data to build a
// TlfHandle from until the first rekey.
FakeInitialRekey(c cryptoPure, h BareTlfHandle) (ExtraMetadata, error)
// Update initializes the given freshly-created BareRootMetadata object with
// the given TlfID and BareTlfHandle. Note that if the given ID/handle are private,
// rekeying must be done separately.
Update(tlf TlfID, h BareTlfHandle) error
// Returns the TLF key bundles for this metadata at the given key generation.
// MDv3 TODO: Get rid of this.
GetTLFKeyBundles(keyGen KeyGen) (*TLFWriterKeyBundleV2, *TLFReaderKeyBundleV2, error)
// GetUserDeviceKeyInfoMaps returns the given user device key info maps for the given
// key generation.
GetUserDeviceKeyInfoMaps(keyGen KeyGen, extra ExtraMetadata) (
readers, writers UserDeviceKeyInfoMap, err error)
// FinalizeRekey is called after all rekeying work has been performed on the underlying
// metadata.
FinalizeRekey(c cryptoPure, prevKey,
key kbfscrypto.TLFCryptKey, extra ExtraMetadata) error
}
// KeyBundleCache is an interface to a key bundle cache for use with v3 metadata.
type KeyBundleCache interface {
// GetTLFReaderKeyBundle returns the TLFReaderKeyBundleV2 for the given TLFReaderKeyBundleID.
GetTLFReaderKeyBundle(TLFReaderKeyBundleID) (TLFReaderKeyBundleV3, bool)
// GetTLFWriterKeyBundle returns the TLFWriterKeyBundleV3 for the given TLFWriterKeyBundleID.
GetTLFWriterKeyBundle(TLFWriterKeyBundleID) (TLFWriterKeyBundleV3, bool)
// PutTLFReaderKeyBundle stores the given TLFReaderKeyBundleV2.
PutTLFReaderKeyBundle(TLFReaderKeyBundleV3)
// PutTLFWriterKeyBundle stores the given TLFWriterKeyBundleV3.
PutTLFWriterKeyBundle(TLFWriterKeyBundleV3)
}
| 1 | 13,682 | I'd prefer a name less chat-specific, since later on we could have other app types that want the same behavior. Maybe `IdentifyAndAllowTrackBreaks`? | keybase-kbfs | go |
@@ -51,10 +51,18 @@ public:
*/
bool load(handle src, bool)
{
+ // Import mpi4py if it does not exist.
+ if (!PyMPIComm_Get)
+ {
+ if (import_mpi4py() < 0)
+ {
+ throw std::runtime_error(
+ "ERROR: mpi4py not loaded correctly\n"); /* Python 2.X */
+ }
+ }
// If src is not actually a MPI4PY communicator, the next
// call returns nullptr, and we return false to indicate the conversion
// failed.
-
MPI_Comm *mpiCommPtr = PyMPIComm_Get(src.ptr());
if (mpiCommPtr == nullptr)
{ | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* py11glue.cpp
*
* Created on: Mar 16, 2017
* Author: William F Godoy [email protected]
*/
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <stdexcept>
#include <adios2.h>
#ifdef ADIOS2_HAVE_MPI
#include <mpi4py/mpi4py.h>
#endif
#include "py11ADIOS.h"
#include "py11Attribute.h"
#include "py11Engine.h"
#include "py11File.h"
#include "py11IO.h"
#include "py11Operator.h"
#include "py11Variable.h"
#ifdef ADIOS2_HAVE_MPI
namespace pybind11
{
namespace detail
{
template <>
struct type_caster<adios2::py11::MPI4PY_Comm>
{
public:
/**
* This macro establishes the name 'MPI4PY_Comm' in
* function signatures and declares a local variable
* 'value' of type MPI4PY_Comm
*/
PYBIND11_TYPE_CASTER(adios2::py11::MPI4PY_Comm, _("MPI4PY_Comm"));
/**
* Conversion part 1 (Python->C++): convert a PyObject into a MPI4PY_Comm
* instance or return false upon failure. The second argument
* indicates whether implicit conversions should be applied.
*/
bool load(handle src, bool)
{
// If src is not actually a MPI4PY communicator, the next
// call returns nullptr, and we return false to indicate the conversion
// failed.
MPI_Comm *mpiCommPtr = PyMPIComm_Get(src.ptr());
if (mpiCommPtr == nullptr)
{
return false;
}
value.comm = *mpiCommPtr;
return true;
}
};
} // namespace detail
} // namespace pybind11
#endif
#ifdef ADIOS2_HAVE_MPI
adios2::py11::File Open(const std::string &name, const std::string mode,
adios2::py11::MPI4PY_Comm comm,
const std::string enginetype)
{
return adios2::py11::File(name, mode, comm, enginetype);
}
adios2::py11::File OpenConfig(const std::string &name, const std::string mode,
adios2::py11::MPI4PY_Comm comm,
const std::string &configfile,
const std::string ioinconfigfile)
{
return adios2::py11::File(name, mode, comm, configfile, ioinconfigfile);
}
#else
adios2::py11::File Open(const std::string &name, const std::string mode,
const std::string enginetype)
{
return adios2::py11::File(name, mode, enginetype);
}
adios2::py11::File OpenConfig(const std::string &name, const std::string mode,
const std::string configfile,
const std::string ioinconfigfile)
{
return adios2::py11::File(name, mode, configfile, ioinconfigfile);
}
#endif
PYBIND11_MODULE(adios2, m)
{
#ifdef ADIOS2_HAVE_MPI
if (import_mpi4py() < 0)
{
throw std::runtime_error(
"ERROR: mpi4py not loaded correctly\n"); /* Python 2.X */
}
#endif
m.attr("DebugON") = true;
m.attr("DebugOFF") = false;
m.attr("ConstantDims") = true;
m.attr("VariableDims") = false;
m.attr("LocalValueDim") = adios2::LocalValueDim;
m.attr("GlobalValue") = false;
m.attr("LocalValue") = true;
// enum classes
pybind11::enum_<adios2::Mode>(m, "Mode")
.value("Write", adios2::Mode::Write)
.value("Read", adios2::Mode::Read)
.value("Append", adios2::Mode::Append)
.value("Deferred", adios2::Mode::Deferred)
.value("Sync", adios2::Mode::Sync)
.export_values();
pybind11::enum_<adios2::ShapeID>(m, "ShapeID")
.value("Unknown", adios2::ShapeID::Unknown)
.value("GlobalValue", adios2::ShapeID::GlobalValue)
.value("GlobalArray", adios2::ShapeID::GlobalArray)
.value("LocalValue", adios2::ShapeID::LocalValue)
.value("LocalArray", adios2::ShapeID::LocalArray)
.export_values();
pybind11::enum_<adios2::StepMode>(m, "StepMode")
.value("Append", adios2::StepMode::Append)
.value("Update", adios2::StepMode::Update)
.value("NextAvailable", adios2::StepMode::NextAvailable)
.value("LatestAvailable", adios2::StepMode::LatestAvailable)
.export_values();
pybind11::enum_<adios2::StepStatus>(m, "StepStatus")
.value("OK", adios2::StepStatus::OK)
.value("NotReady", adios2::StepStatus::NotReady)
.value("EndOfStream", adios2::StepStatus::EndOfStream)
.value("OtherError", adios2::StepStatus::OtherError)
.export_values();
#ifdef ADIOS2_HAVE_MPI
m.def("open", &Open, pybind11::arg("name"), pybind11::arg("mode"),
pybind11::arg("comm"), pybind11::arg("engine_type") = "BPFile", R"md(
Simple API MPI open, based on python IO.
Allows for passing parameters in source code.
Parameters
name
stream name
mode
"w" : write,
"r" : read,
"a" : append (append not yet supported)
comm (mpi4py)
MPI communicator
engine_type
adios2 engine type, default=BPFile
Returns
file (adios2 stream)
handler to adios File for the simple Python API
)md");
m.def("open", &OpenConfig, pybind11::arg("name"), pybind11::arg("mode"),
pybind11::arg("comm"), pybind11::arg("config_file"),
pybind11::arg("io_in_config_file"), R"md(
Simple API MPI open, based on python IO.
Allows for passing a runtime configuration file in xml format and the
name of the io element related to the returning File.
Parameters
name
stream name
mode
"w" : write,
"r" : read,
"a" : append (append not yet supported)
comm (mpi4py)
MPI communicator
config_file
adios2 runtime configuration file name, in xml format
io_in_config_file
io element in configfile related to returning File
Returns
file (adios2 stream)
handler to adios File for the simple Python API
)md");
#else
m.def("open", &Open, "High-level API, file object open",
pybind11::arg("name"), pybind11::arg("mode"),
pybind11::arg("engine_type") = "BPFile");
m.def("open", &OpenConfig,
"High-level API, file object open with a runtime config file",
pybind11::arg("name"), pybind11::arg("mode"),
pybind11::arg("config_file"), pybind11::arg("io_in_config_file"));
#endif
pybind11::class_<adios2::py11::ADIOS>(m, "ADIOS")
// Python 2
.def("__nonzero__",
[](const adios2::py11::ADIOS &adios) {
const bool opBool = adios ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::ADIOS &adios) {
const bool opBool = adios ? true : false;
return opBool;
})
#ifdef ADIOS2_HAVE_MPI
.def(pybind11::init<const adios2::py11::MPI4PY_Comm, const bool>(),
"adios2 module starting point, constructs an ADIOS class object",
pybind11::arg("comm"), pybind11::arg("debugMode") = true)
.def(pybind11::init<const std::string &,
const adios2::py11::MPI4PY_Comm, const bool>(),
"adios2 module starting point, constructs an ADIOS class object",
pybind11::arg("configFile"), pybind11::arg("comm"),
pybind11::arg("debugMode") = true)
#else
.def(pybind11::init<const bool>(),
"adios2 module starting point "
"non-MPI, constructs an ADIOS class "
"object",
pybind11::arg("debugMode") = true)
.def(pybind11::init<const std::string &, const bool>(),
"adios2 module starting point non-MPI, constructs an ADIOS class "
"object",
pybind11::arg("configFile"), pybind11::arg("debugMode") = true)
#endif
.def("DeclareIO", &adios2::py11::ADIOS::DeclareIO,
"spawn IO object component returning a IO object with a unique "
"name, throws an exception if IO with the same name is declared "
"twice")
.def("AtIO", &adios2::py11::ADIOS::AtIO,
"returns an IO object "
"previously defined IO object "
"with DeclareIO, throws "
"an exception if not found")
.def("DefineOperator", &adios2::py11::ADIOS::DefineOperator)
.def("InquireOperator", &adios2::py11::ADIOS::InquireOperator)
.def("FlushAll", &adios2::py11::ADIOS::FlushAll,
"flushes all engines in all spawned IO objects")
.def("RemoveIO", &adios2::py11::ADIOS::RemoveIO,
"DANGER ZONE: remove a particular IO by name, creates dangling "
"objects to parameters, variable, attributes, engines created "
"with removed IO")
.def("RemoveAllIOs", &adios2::py11::ADIOS::RemoveAllIOs,
"DANGER ZONE: remove all IOs in current ADIOS object, creates "
"dangling objects to parameters, variable, attributes, engines "
"created with removed IO");
pybind11::class_<adios2::py11::IO>(m, "IO")
// Python 2
.def("__nonzero__",
[](const adios2::py11::IO &io) {
const bool opBool = io ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::IO &io) {
const bool opBool = io ? true : false;
return opBool;
})
.def("SetEngine", &adios2::py11::IO::SetEngine)
.def("SetParameters", &adios2::py11::IO::SetParameters,
pybind11::arg("parameters") = adios2::Params())
.def("SetParameter", &adios2::py11::IO::SetParameter)
.def("Parameters", &adios2::py11::IO::Parameters)
.def("AddTransport", &adios2::py11::IO::AddTransport,
pybind11::arg("type"),
pybind11::arg("parameters") = adios2::Params())
.def("DefineVariable",
(adios2::py11::Variable(adios2::py11::IO::*)(
const std::string &, const pybind11::array &,
const adios2::Dims &, const adios2::Dims &,
const adios2::Dims &, const bool)) &
adios2::py11::IO::DefineVariable,
pybind11::return_value_policy::move, pybind11::arg("name"),
pybind11::arg("array"), pybind11::arg("shape") = adios2::Dims(),
pybind11::arg("start") = adios2::Dims(),
pybind11::arg("count") = adios2::Dims(),
pybind11::arg("isConstantDims") = false)
.def(
"DefineVariable",
(adios2::py11::Variable(adios2::py11::IO::*)(const std::string &)) &
adios2::py11::IO::DefineVariable,
pybind11::return_value_policy::move, pybind11::arg("name"))
.def("InquireVariable", &adios2::py11::IO::InquireVariable,
pybind11::return_value_policy::move)
.def("InquireAttribute", &adios2::py11::IO::InquireAttribute,
pybind11::return_value_policy::move)
.def("DefineAttribute",
(adios2::py11::Attribute(adios2::py11::IO::*)(
const std::string &, const pybind11::array &,
const std::string &, const std::string)) &
adios2::py11::IO::DefineAttribute,
pybind11::arg("name"), pybind11::arg("array"),
pybind11::arg("variableName") = "",
pybind11::arg("separator") = "/",
pybind11::return_value_policy::move)
.def("DefineAttribute",
(adios2::py11::Attribute(adios2::py11::IO::*)(
const std::string &, const std::string &, const std::string &,
const std::string)) &
adios2::py11::IO::DefineAttribute,
pybind11::arg("name"), pybind11::arg("stringValue"),
pybind11::arg("variableName") = "",
pybind11::arg("separator") = "/",
pybind11::return_value_policy::move)
.def("DefineAttribute",
(adios2::py11::Attribute(adios2::py11::IO::*)(
const std::string &, const std::vector<std::string> &,
const std::string &, const std::string)) &
adios2::py11::IO::DefineAttribute,
pybind11::arg("name"), pybind11::arg("strings"),
pybind11::arg("variableName") = "",
pybind11::arg("separator") = "/",
pybind11::return_value_policy::move)
.def("Open", (adios2::py11::Engine(adios2::py11::IO::*)(
const std::string &, const int)) &
adios2::py11::IO::Open)
#ifdef ADIOS2_HAVE_MPI
.def("Open", (adios2::py11::Engine(adios2::py11::IO::*)(
const std::string &, const int,
adios2::py11::MPI4PY_Comm comm)) &
adios2::py11::IO::Open)
#endif
.def("AvailableVariables", &adios2::py11::IO::AvailableVariables)
.def("AvailableAttributes", &adios2::py11::IO::AvailableAttributes)
.def("FlushAll", &adios2::py11::IO::FlushAll)
.def("EngineType", &adios2::py11::IO::EngineType)
.def("LockDefinitions", &adios2::py11::IO::LockDefinitions)
.def("RemoveVariable", &adios2::py11::IO::RemoveVariable)
.def("RemoveAllVariables", &adios2::py11::IO::RemoveAllVariables)
.def("RemoveAttribute", &adios2::py11::IO::RemoveAttribute)
.def("RemoveAllAttributes", &adios2::py11::IO::RemoveAllAttributes);
pybind11::class_<adios2::py11::Variable>(m, "Variable")
// Python 2
.def("__nonzero__",
[](const adios2::py11::Variable &variable) {
const bool opBool = variable ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::Variable &variable) {
const bool opBool = variable ? true : false;
return opBool;
})
.def("SetShape", &adios2::py11::Variable::SetShape)
.def("SetBlockSelection", &adios2::py11::Variable::SetBlockSelection)
.def("SetSelection", &adios2::py11::Variable::SetSelection)
.def("SetStepSelection", &adios2::py11::Variable::SetStepSelection)
.def("SelectionSize", &adios2::py11::Variable::SelectionSize)
.def("Name", &adios2::py11::Variable::Name)
.def("Type", &adios2::py11::Variable::Type)
.def("Sizeof", &adios2::py11::Variable::Sizeof)
.def("ShapeID", &adios2::py11::Variable::ShapeID)
.def("Shape", &adios2::py11::Variable::Shape,
pybind11::arg("step") = adios2::EngineCurrentStep)
.def("Start", &adios2::py11::Variable::Start)
.def("Count", &adios2::py11::Variable::Count)
.def("Steps", &adios2::py11::Variable::Steps)
.def("StepsStart", &adios2::py11::Variable::StepsStart)
.def("BlockID", &adios2::py11::Variable::BlockID)
.def("AddOperation", &adios2::py11::Variable::AddOperation)
.def("Operations", &adios2::py11::Variable::Operations);
pybind11::class_<adios2::py11::Attribute>(m, "Attribute")
// Python 2
.def("__nonzero__",
[](const adios2::py11::Attribute &attribute) {
const bool opBool = attribute ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::Attribute &attribute) {
const bool opBool = attribute ? true : false;
return opBool;
})
.def("Name", &adios2::py11::Attribute::Name)
.def("Type", &adios2::py11::Attribute::Type)
.def("DataString", &adios2::py11::Attribute::DataString)
.def("Data", &adios2::py11::Attribute::Data);
pybind11::class_<adios2::py11::Engine>(m, "Engine")
// Python 2
.def("__nonzero__",
[](const adios2::py11::Engine &engine) {
const bool opBool = engine ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::Engine &engine) {
const bool opBool = engine ? true : false;
return opBool;
})
.def("BeginStep",
(adios2::StepStatus(adios2::py11::Engine::*)(
const adios2::StepMode, const float)) &
adios2::py11::Engine::BeginStep,
pybind11::arg("mode"), pybind11::arg("timeoutSeconds") = -1.f,
pybind11::return_value_policy::move)
.def("BeginStep",
(adios2::StepStatus(adios2::py11::Engine::*)()) &
adios2::py11::Engine::BeginStep,
pybind11::return_value_policy::move)
.def("Put",
(void (adios2::py11::Engine::*)(adios2::py11::Variable,
const pybind11::array &,
const adios2::Mode launch)) &
adios2::py11::Engine::Put,
pybind11::arg("variable"), pybind11::arg("array"),
pybind11::arg("launch") = adios2::Mode::Deferred)
.def("Put", (void (adios2::py11::Engine::*)(adios2::py11::Variable,
const std::string &)) &
adios2::py11::Engine::Put)
.def("PerformPuts", &adios2::py11::Engine::PerformPuts)
.def("Get",
(void (adios2::py11::Engine::*)(adios2::py11::Variable,
pybind11::array &,
const adios2::Mode launch)) &
adios2::py11::Engine::Get,
pybind11::arg("variable"), pybind11::arg("array"),
pybind11::arg("launch") = adios2::Mode::Deferred)
.def("Get",
(void (adios2::py11::Engine::*)(adios2::py11::Variable,
std::string &,
const adios2::Mode launch)) &
adios2::py11::Engine::Get,
pybind11::arg("variable"), pybind11::arg("string"),
pybind11::arg("launch") = adios2::Mode::Deferred)
.def("PerformGets", &adios2::py11::Engine::PerformGets)
.def("EndStep", &adios2::py11::Engine::EndStep)
.def("Flush", &adios2::py11::Engine::Flush)
.def("Close", &adios2::py11::Engine::Close,
pybind11::arg("transportIndex") = -1)
.def("CurrentStep", &adios2::py11::Engine::CurrentStep)
.def("Name", &adios2::py11::Engine::Name)
.def("Type", &adios2::py11::Engine::Type)
.def("Steps", &adios2::py11::Engine::Steps);
pybind11::class_<adios2::py11::Operator>(m, "Operator")
// Python 2
.def("__nonzero__",
[](const adios2::py11::Operator &op) {
const bool opBool = op ? true : false;
return opBool;
})
// Python 3
.def("__bool__",
[](const adios2::py11::Operator &op) {
const bool opBool = op ? true : false;
return opBool;
})
.def("Type", &adios2::py11::Operator::Type)
.def("SetParameter", &adios2::py11::Operator::SetParameter)
.def("Parameters", &adios2::py11::Operator::Parameters);
pybind11::class_<adios2::py11::File>(m, "File")
.def("__repr__",
[](const adios2::py11::File &stream) {
return "<adios2.file named '" + stream.m_Name +
"' and mode '" + stream.m_Mode + "'>";
})
// enter and exit are defined for the with-as operator in Python
.def("__enter__",
[](const adios2::py11::File &stream) { return stream; })
.def("__exit__",
[](adios2::py11::File &stream, pybind11::args) { stream.Close(); })
.def("__iter__", [](adios2::py11::File &stream) { return stream; },
pybind11::keep_alive<0, 1>())
.def("__next__",
[](adios2::py11::File &stream) {
if (!stream.GetStep())
{
throw pybind11::stop_iteration();
}
return stream;
})
.def("set_parameter", &adios2::py11::File::SetParameter,
pybind11::arg("key"), pybind11::arg("value"), R"md(
Sets a single parameter. Overwrites value if key exists.
Parameters
key
input parameter key
value
parameter value
)md")
.def("set_parameters", &adios2::py11::File::SetParameters,
pybind11::arg("parameters"), R"md(
Sets parameters using a dictionary.
Removes any previous parameter.
Parameters
parameters dictionary
input key/value parameters
value
parameter value
)md")
.def("add_transport", &adios2::py11::File::AddTransport,
pybind11::return_value_policy::move, pybind11::arg("type"),
pybind11::arg("parameters") = adios2::Params(), R"md(
Adds a transport and its parameters to current IO. Must be
supported by current engine type.
Parameters
type
must be a supported transport type for current engine.
parameters dictionary
acceptable parameters for a particular transport
CAN'T use the keywords "Transport" or "transport" in key
Returns
transportindex
handler to added transport
)md")
.def("available_variables", &adios2::py11::File::AvailableVariables,
pybind11::return_value_policy::move, R"md(
Returns a 2-level dictionary with variable information.
Read mode only.
Returns
variables dictionary
key
variable name
value
variable information dictionary
)md")
.def("available_attributes", &adios2::py11::File::AvailableAttributes,
pybind11::return_value_policy::move, R"md(
Returns a 2-level dictionary with attribute information.
Read mode only.
Returns
attributes dictionary
key
attribute name
value
attribute information dictionary
)md")
.def("write",
(void (adios2::py11::File::*)(
const std::string &, const pybind11::array &,
const adios2::Dims &, const adios2::Dims &,
const adios2::Dims &, const bool)) &
adios2::py11::File::Write,
pybind11::arg("name"), pybind11::arg("array"),
pybind11::arg("shape") = adios2::Dims(),
pybind11::arg("start") = adios2::Dims(),
pybind11::arg("count") = adios2::Dims(),
pybind11::arg("end_step") = false,
R"md(
writes a self-describing array (numpy) variable
Parameters
name
variable name
array: numpy
variable data values
shape
variable global MPI dimensions.
Pass empty numpy array for local variables.
start
variable offset for current MPI rank.
Pass empty numpy array for local variables.
count
variable dimension for current MPI rank.
Pass a numpy array for local variables.
end_step
end current step, begin next step and flush (default = false).
)md")
.def("write",
(void (adios2::py11::File::*)(
const std::string &, const pybind11::array &,
const adios2::Dims &, const adios2::Dims &,
const adios2::Dims &, const adios2::vParams &, const bool)) &
adios2::py11::File::Write,
pybind11::arg("name"), pybind11::arg("array"),
pybind11::arg("shape"), pybind11::arg("start"),
pybind11::arg("count"), pybind11::arg("operations"),
pybind11::arg("end_step") = false,
R"md(
writes a self-describing array (numpy) variable with operations
e.g. compression: 'zfp', 'mgard', 'sz'
Parameters
name
variable name
array: numpy
variable data values
shape
variable global MPI dimensions.
Pass empty numpy array for local variables.
start
variable offset for current MPI rank.
Pass empty numpy array for local variables.
count
variable dimension for current MPI rank.
Pass a numpy array for local variables.
end_step
end current step, begin next step and flush (default = false).
)md")
.def("write",
(void (adios2::py11::File::*)(const std::string &,
const pybind11::array &, const bool,
const bool)) &
adios2::py11::File::Write,
pybind11::arg("name"), pybind11::arg("array"),
pybind11::arg("local_value") = false,
pybind11::arg("end_step") = false, R"md(
writes a self-describing single value array (numpy) variable
Parameters
name
variable name
array: numpy
variable data single value
local_value
true: local value, false: global value
end_step
end current step, begin next step and flush
(default = false).
)md")
.def("write",
(void (adios2::py11::File::*)(const std::string &,
const std::string &, const bool,
const bool)) &
adios2::py11::File::Write,
pybind11::arg("name"), pybind11::arg("string"),
pybind11::arg("local_value") = false,
pybind11::arg("end_step") = false, R"md(
writes a self-describing single value string variable
Parameters
name
variable name
string
variable data single value
local_value
true: local value, false: global value
end_step
end current step, begin next step and flush
(default = false).
)md")
.def("write_attribute",
(void (adios2::py11::File::*)(
const std::string &, const pybind11::array &,
const std::string &, const std::string, const bool)) &
adios2::py11::File::WriteAttribute,
pybind11::arg("name"), pybind11::arg("array"),
pybind11::arg("variable_name") = "",
pybind11::arg("separator") = "/",
pybind11::arg("end_step") = false,
R"md(
writes a self-describing single value array (numpy) variable
Parameters
name
attribute name
array: numpy
attribute numpy array data
variable_name:
if attribute is associated with a variable
separator:
concatenation string between variablename and attribute
e.g. variablename + separator + name
var/units. Not used if variablename is empty
end_step
end current step, begin next step and flush
(default = false).
)md")
.def("write_attribute",
(void (adios2::py11::File::*)(
const std::string &, const std::string &, const std::string &,
const std::string, const bool)) &
adios2::py11::File::WriteAttribute,
pybind11::arg("name"), pybind11::arg("string_value"),
pybind11::arg("variable_name") = "",
pybind11::arg("separator") = "/",
pybind11::arg("end_step") = false,
R"md(
writes a self-describing single value array (numpy) variable
Parameters
name
attribute name
stringvalue:
attribute single string
variablename:
if attribute is associated with a variable
separator:
concatenation string between variablename and attribute
e.g. variablename + separator + name
var/units. Not used if variablename is empty
end_step
end current step, begin next step and flush
(default = false).
)md")
.def("write_attribute",
(void (adios2::py11::File::*)(
const std::string &, const std::vector<std::string> &,
const std::string &, const std::string, const bool)) &
adios2::py11::File::WriteAttribute,
pybind11::arg("name"), pybind11::arg("string_array"),
pybind11::arg("variable_name") = "",
pybind11::arg("separator") = "/",
pybind11::arg("end_step") = false,
R"md(
writes a self-describing single value array (numpy) variable
Parameters
name
attribute name
stringarray:
attribute string array
variablename:
if attribute is associated with a variable
separator:
concatenation string between variablename and attribute
e.g. variablename + separator + name
var/units. Not used if variablename is empty
end_step
end current step, begin next step and flush
(default = false).
)md")
.def("read_string",
(std::vector<std::string>(adios2::py11::File::*)(
const std::string &, const size_t)) &
adios2::py11::File::ReadString,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("block_id") = 0,
R"md(
Reads string value for current step
(use for streaming mode step by step)
Parameters
name
string variable name
block_id
required for local variables
Returns
list string
data string values. For global values, returns 1 element list,
for local_values an n-block size list
)md")
.def("read_string",
(std::vector<std::string>(adios2::py11::File::*)(
const std::string &, const size_t, const size_t,
const size_t)) &
adios2::py11::File::ReadString,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("step_start"),
pybind11::arg("step_count"), pybind11::arg("block_id") = 0,
R"md(
Reads string value for a certain step
(random access mode)
Parameters
name
string variable name
step_start
variable step start
step_count
variable number of steps to read from step_start
block_id
required for local variables
Returns
string list
data string values for a certain step range.
)md")
.def("read",
(pybind11::array(adios2::py11::File::*)(const std::string &,
const size_t)) &
adios2::py11::File::Read,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("block_id") = 0, R"md(
Reads entire variable for current step
(streaming mode step by step)
Parameters
name
variable name
block_id
required for local array variables
Returns
array: numpy
values of variable name for current step.
Single values will have a shape={1} numpy array
)md")
.def("read",
(pybind11::array(adios2::py11::File::*)(
const std::string &, const adios2::Dims &,
const adios2::Dims &, const size_t)) &
adios2::py11::File::Read,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("start"),
pybind11::arg("count"), pybind11::arg("block_id") = 0,
R"md(
Reads a selection piece in dimension for current step
(streaming mode step by step)
Parameters
name
variable name
start
variable local offset selection
count
variable local dimension selection from start
block_id
required for local array variables
Returns
array: numpy
values of variable name for current step
empty if exception is thrown
)md")
.def(
"read",
(pybind11::array(adios2::py11::File::*)(
const std::string &, const adios2::Dims &, const adios2::Dims &,
const size_t, const size_t, const size_t)) &
adios2::py11::File::Read,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("start"),
pybind11::arg("count"), pybind11::arg("step_start"),
pybind11::arg("step_count"), pybind11::arg("block_id") = 0, R"md(
Random access read allowed to select steps,
only valid with File Engines
Parameters
name
variable to be read
start
variable offset dimensions
count
variable local dimensions from offset
step_start
variable step start
step_count
variable number of steps to read from step_start
block_id
required for local array variables
Returns
array: numpy
resulting array from selection
)md")
.def("read_attribute",
(pybind11::array(adios2::py11::File::*)(
const std::string &, const std::string &, const std::string)) &
adios2::py11::File::ReadAttribute,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("variable_name") = "",
pybind11::arg("separator") = "/",
R"md(
Reads a numpy based attribute
Parameters
name
attribute name
variablename:
if attribute is associated with a variable
separator:
concatenation string between variablename and attribute
e.g. variablename + separator + name
var/units. Not used if variablename is empty
Returns
array: numpy
resulting array attribute data
)md")
.def("read_attribute_string",
(std::vector<std::string>(adios2::py11::File::*)(
const std::string &, const std::string &, const std::string)) &
adios2::py11::File::ReadAttributeString,
pybind11::return_value_policy::take_ownership,
pybind11::arg("name"), pybind11::arg("variable_name") = "",
pybind11::arg("separator") = "/",
R"md(
Read a string attribute
Parameters
name
attribute name
variablename:
if attribute is associated with a variable
separator:
concatenation string between variablename and attribute
e.g. variablename + separator + name
var/units. Not used if variablename is empty
Returns
list:
resulting string list attribute data
)md")
.def("end_step", &adios2::py11::File::EndStep, R"md(
Write mode: advances to the next step.
Convenient when declaring variable attributes
as advancing to the next
step is not attached to any variable.
Read mode: in streaming mode releases the current step
(no effect in file based engines)
)md")
.def("close", &adios2::py11::File::Close, R"md(
Closes file, thus becoming unreachable.
Not required if using open in a with-as statement.
Required in all other cases per-open to avoid resource leaks.
)md")
.def("current_step", &adios2::py11::File::CurrentStep, R"md(
Inspect current step when using for-in loops, read mode only
Returns
current step
)md")
.def("steps", &adios2::py11::File::Steps, R"md(
Inspect total number of available steps, use for file engines in
read mode only
Returns
steps
)md");
}
| 1 | 12,824 | I think I was involved in writing that caster, but I never fully understood the `import_mpi4py` thing. Why is importing mpi4py still necessary at that point? I would think if the user is passing a communicator from python code, they must already have imported mpi4py themselves, or does that not propagate through into the wrapping layer? | ornladios-ADIOS2 | cpp |
@@ -142,13 +142,14 @@ func (cache *httpCache) retrieve(target *core.BuildTarget, key []byte) (bool, er
resp, err := cache.client.Do(req)
if err != nil {
return false, err
- } else if resp.StatusCode == http.StatusNotFound {
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusNotFound {
return false, nil // doesn't exist - not an error
} else if resp.StatusCode != http.StatusOK {
b, _ := ioutil.ReadAll(resp.Body)
return false, fmt.Errorf("%s", string(b))
}
- defer resp.Body.Close()
gzr, err := gzip.NewReader(resp.Body)
if err != nil {
return false, err | 1 | // Http-based cache.
package cache
import (
"archive/tar"
"compress/gzip"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"time"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
type httpCache struct {
url string
writable bool
client *http.Client
requestLimiter limiter
}
type limiter chan struct{}
func (l limiter) acquire() {
l <- struct{}{}
}
func (l limiter) release() {
<-l
}
// mtime is the time we attach for the modification time of all files.
var mtime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
// nobody is the usual uid / gid of the 'nobody' user.
const nobody = 65534
func (cache *httpCache) Store(target *core.BuildTarget, key []byte, files []string) {
if cache.writable {
cache.requestLimiter.acquire()
defer cache.requestLimiter.release()
r, w := io.Pipe()
go cache.write(w, target, files)
req, err := http.NewRequest(http.MethodPut, cache.makeURL(key), r)
if err != nil {
log.Warning("Invalid cache URL: %s", err)
return
}
if resp, err := cache.client.Do(req); err != nil {
log.Warning("Failed to store files in HTTP cache: %s", err)
} else {
resp.Body.Close()
}
}
}
// makeURL returns the remote URL for a key.
func (cache *httpCache) makeURL(key []byte) string {
return cache.url + "/" + hex.EncodeToString(key)
}
// write writes a series of files into the given Writer.
func (cache *httpCache) write(w io.WriteCloser, target *core.BuildTarget, files []string) {
defer w.Close()
gzw := gzip.NewWriter(w)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
outDir := target.OutDir()
for _, out := range files {
if err := fs.Walk(path.Join(outDir, out), func(name string, isDir bool) error {
return cache.storeFile(tw, name)
}); err != nil {
log.Warning("Error uploading artifacts to HTTP cache: %s", err)
// TODO(peterebden): How can we cancel the request at this point?
}
}
}
func (cache *httpCache) storeFile(tw *tar.Writer, name string) error {
info, err := os.Lstat(name)
if err != nil {
return err
}
target := ""
if info.Mode()&os.ModeSymlink != 0 {
target, _ = os.Readlink(name)
}
hdr, err := tar.FileInfoHeader(info, target)
if err != nil {
return err
}
hdr.Name = name
// Zero out all timestamps.
hdr.ModTime = mtime
hdr.AccessTime = mtime
hdr.ChangeTime = mtime
// Strip user/group ids.
hdr.Uid = nobody
hdr.Gid = nobody
hdr.Uname = "nobody"
hdr.Gname = "nobody"
if err := tw.WriteHeader(hdr); err != nil {
return err
} else if info.IsDir() || target != "" {
return nil // nothing to write
}
f, err := os.Open(name)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(tw, f)
return err
}
func (cache *httpCache) Retrieve(target *core.BuildTarget, key []byte, files []string) bool {
cache.requestLimiter.acquire()
defer cache.requestLimiter.release()
m, err := cache.retrieve(target, key)
if err != nil {
log.Warning("%s: Failed to retrieve files from HTTP cache: %s", target.Label, err)
}
return m
}
func (cache *httpCache) retrieve(target *core.BuildTarget, key []byte) (bool, error) {
req, err := http.NewRequest(http.MethodGet, cache.makeURL(key), nil)
if err != nil {
return false, err
}
resp, err := cache.client.Do(req)
if err != nil {
return false, err
} else if resp.StatusCode == http.StatusNotFound {
return false, nil // doesn't exist - not an error
} else if resp.StatusCode != http.StatusOK {
b, _ := ioutil.ReadAll(resp.Body)
return false, fmt.Errorf("%s", string(b))
}
defer resp.Body.Close()
gzr, err := gzip.NewReader(resp.Body)
if err != nil {
return false, err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
hdr, err := tr.Next()
if err != nil {
if err == io.EOF {
return true, nil
}
return false, err
}
switch hdr.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(hdr.Name, core.DirPermissions); err != nil {
return false, err
}
case tar.TypeReg:
if dir := path.Dir(hdr.Name); dir != "." {
if err := os.MkdirAll(dir, core.DirPermissions); err != nil {
return false, err
}
}
if f, err := os.OpenFile(hdr.Name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, os.FileMode(hdr.Mode)); err != nil {
return false, err
} else if _, err := io.Copy(f, tr); err != nil {
return false, err
} else if err := f.Close(); err != nil {
return false, err
}
case tar.TypeSymlink:
if err := os.Symlink(hdr.Linkname, hdr.Name); err != nil {
return false, err
}
default:
log.Warning("Unhandled file type %d for %s", hdr.Typeflag, hdr.Name)
}
}
}
func (cache *httpCache) Clean(target *core.BuildTarget) {
// Not possible; this implementation can only clean for a hash.
}
func (cache *httpCache) CleanAll() {
// Also not possible.
}
func (cache *httpCache) Shutdown() {}
func newHTTPCache(config *core.Configuration) *httpCache {
return &httpCache{
url: config.Cache.HTTPURL.String(),
writable: config.Cache.HTTPWriteable,
client: &http.Client{
Timeout: time.Duration(config.Cache.HTTPTimeout),
},
requestLimiter: make(limiter, config.Cache.HTTPConcurrentRequestLimit),
}
}
| 1 | 9,192 | This seems like a more interesting change. Maybe we should re-name the PR | thought-machine-please | go |
@@ -20,6 +20,8 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal
IApplicationTransportFeature,
ITransportSchedulerFeature,
IConnectionLifetimeFeature,
+ IConnectionHeartbeatFeature,
+ IConnectionLifetimeNotificationFeature,
IBytesWrittenFeature
{
// NOTE: When feature interfaces are added to or removed from this TransportConnection class implementation, | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Buffers;
using System.Collections.Generic;
using System.IO.Pipelines;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Connections.Features;
using Microsoft.AspNetCore.Http.Features;
namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal
{
public partial class TransportConnection : IHttpConnectionFeature,
IConnectionIdFeature,
IConnectionTransportFeature,
IConnectionItemsFeature,
IMemoryPoolFeature,
IApplicationTransportFeature,
ITransportSchedulerFeature,
IConnectionLifetimeFeature,
IBytesWrittenFeature
{
// NOTE: When feature interfaces are added to or removed from this TransportConnection class implementation,
// then the list of `features` in the generated code project MUST also be updated.
// See also: tools/CodeGenerator/TransportConnectionFeatureCollection.cs
string IHttpConnectionFeature.ConnectionId
{
get => ConnectionId;
set => ConnectionId = value;
}
IPAddress IHttpConnectionFeature.RemoteIpAddress
{
get => RemoteAddress;
set => RemoteAddress = value;
}
IPAddress IHttpConnectionFeature.LocalIpAddress
{
get => LocalAddress;
set => LocalAddress = value;
}
int IHttpConnectionFeature.RemotePort
{
get => RemotePort;
set => RemotePort = value;
}
int IHttpConnectionFeature.LocalPort
{
get => LocalPort;
set => LocalPort = value;
}
MemoryPool<byte> IMemoryPoolFeature.MemoryPool => MemoryPool;
IDuplexPipe IConnectionTransportFeature.Transport
{
get => Transport;
set => Transport = value;
}
IDuplexPipe IApplicationTransportFeature.Application
{
get => Application;
set => Application = value;
}
IDictionary<object, object> IConnectionItemsFeature.Items
{
get => Items;
set => Items = value;
}
PipeScheduler ITransportSchedulerFeature.InputWriterScheduler => InputWriterScheduler;
PipeScheduler ITransportSchedulerFeature.OutputReaderScheduler => OutputReaderScheduler;
CancellationToken IConnectionLifetimeFeature.ConnectionClosed
{
get => ConnectionClosed;
set => ConnectionClosed = value;
}
void IConnectionLifetimeFeature.Abort() => Abort(abortReason: null);
long IBytesWrittenFeature.TotalBytesWritten => TotalBytesWritten;
}
}
| 1 | 16,402 | Nit: implement the methods for these interfaces explicitly in this file for consistency. It can be passthrough. | aspnet-KestrelHttpServer | .cs |
@@ -102,6 +102,7 @@ public class CSharpCommonTransformer {
.typeName(typeName)
.setCallName("")
.addCallName("")
+ .getCallName("")
.isMap(false)
.isArray(false)
.isPrimitive(false) | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.csharp;
import com.google.api.codegen.config.GapicMethodConfig;
import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType;
import com.google.api.codegen.transformer.GapicInterfaceContext;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.ParamWithSimpleDoc;
import com.google.api.tools.framework.model.Method;
import com.google.common.collect.ImmutableList;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class CSharpCommonTransformer {
public void addCommonImports(GapicInterfaceContext context) {
ModelTypeTable typeTable = context.getModelTypeTable();
// Common imports, only one class per required namespace is needed.
typeTable.saveNicknameFor("Google.Api.Gax.GaxPreconditions");
typeTable.saveNicknameFor("Google.Api.Gax.Grpc.ServiceSettingsBase");
typeTable.saveNicknameFor("Google.Protobuf.WellKnownTypes.SomeSortOfWellKnownType");
typeTable.saveNicknameFor("Grpc.Core.StatusCode");
typeTable.saveNicknameFor("System.Collections.ObjectModel.ReadOnlyCollection");
typeTable.saveNicknameFor("System.Threading.Tasks.Task");
typeTable.saveNicknameFor("System.Threading.Thread");
typeTable.saveNicknameFor("System.NotImplementedException");
typeTable.saveNicknameFor("System.Collections.IEnumerable");
typeTable.saveNicknameFor("System.Collections.Generic.IEnumerable");
}
public List<Method> getSupportedMethods(GapicInterfaceContext context) {
List<Method> result = new ArrayList<>();
boolean mixinsDisabled = !context.getFeatureConfig().enableMixins();
for (Method method : context.getSupportedMethods()) {
if (mixinsDisabled && context.getMethodConfig(method).getRerouteToGrpcInterface() != null) {
continue;
}
GapicMethodConfig methodConfig = context.getMethodConfig(method);
if (methodConfig.getGrpcStreamingType() != GrpcStreamingType.NonStreaming
&& methodConfig.getGrpcStreamingType() != GrpcStreamingType.BidiStreaming) {
// Only support non-streaming and duplex-streaming for now
continue;
}
result.add(method);
}
return result;
}
public List<ParamWithSimpleDoc> callSettingsParam() {
return ImmutableList.of(
makeParam(
"CallSettings",
"callSettings",
"null",
"If not null, applies overrides to this RPC call."));
}
public List<ParamWithSimpleDoc> cancellationTokenParam() {
return ImmutableList.of(
makeParam(
"CancellationToken",
"cancellationToken",
null,
"A <see cref=\"CancellationToken\"/> to use for this RPC."));
}
public List<ParamWithSimpleDoc> pagedMethodAdditionalParams() {
return ImmutableList.of(
makeParam(
"string",
"pageToken",
"null",
"The token returned from the previous request.",
"A value of <c>null</c> or an empty string retrieves the first page."),
makeParam(
"int?",
"pageSize",
"null",
"The size of page to request. The response will not be larger than this, but may be smaller.",
"A value of <c>null</c> or 0 uses a server-defined page size."));
}
public ParamWithSimpleDoc makeParam(
String typeName, String name, String defaultValue, String... doc) {
return ParamWithSimpleDoc.newBuilder()
.name(name)
.elementTypeName("")
.typeName(typeName)
.setCallName("")
.addCallName("")
.isMap(false)
.isArray(false)
.isPrimitive(false)
.isOptional(false)
.defaultValue(defaultValue)
.docLines(Arrays.asList(doc))
.build();
}
}
| 1 | 22,326 | Not directly related to your PR, but I think here we should create a Type enum (MAP, ARRAY, PRIMITIVE) instead of having 3 flags. | googleapis-gapic-generator | java |
@@ -444,6 +444,7 @@ ExWorkProcRetcode ExHbaseScanSQTaskTcb::work(short &rc)
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
+ NABoolean isFirstBatch = false;
while (1)
{ | 1 | // **********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
// **********************************************************************
#include "Platform.h"
#include "ex_stdh.h"
#include "ComTdb.h"
#include "ex_tcb.h"
#include "ExHbaseAccess.h"
#include "ex_exe_stmt_globals.h"
#include "ExpLOBinterface.h"
#include "SQLTypeDefs.h"
#include "ExpHbaseInterface.h"
ExHbaseScanTaskTcb::ExHbaseScanTaskTcb(
ExHbaseAccessSelectTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseScanTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseScanTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = SCAN_OPEN;
}
break;
case SCAN_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
if (tcb_->setupHbaseFilterPreds())
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
tcb_->columns_, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScanner(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->numCacheRows(),
FALSE,
(tcb_->hbaseFilterColumns_.entries() > 0 ?
&tcb_->hbaseFilterColumns_ : NULL),
(tcb_->hbaseFilterOps_.entries() > 0 ?
&tcb_->hbaseFilterOps_ : NULL),
(tcb_->hbaseFilterValues_.entries() > 0 ?
&tcb_->hbaseFilterValues_ : NULL),
tcb_->getSamplePercentage(),
FALSE, 0, NULL, NULL, 0,
(tcb_->hbaseAccessTdb().getHbaseAccessOptions()
? tcb_->hbaseAccessTdb().getHbaseAccessOptions()->getNumVersions() : 0));
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = SCAN_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->colVal_.len = tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell(tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_, tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
{
step_ = NEXT_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
step_ = HANDLE_ERROR;
else
step_ = CREATE_ROW;
}
break;
case CREATE_ROW:
{
rc = tcb_->createColumnwiseRow();
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
else if (tcb_->setupError(rc, "ExHbaseAccessTcb::createColumnwiseRow", "Not enough space in target buffer to move data"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case RETURN_ROW:
{
if (tcb_->moveRowToUpQueue(tcb_->convertRow_,
tcb_->hbaseAccessTdb().convertRowLen(), &rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = SCAN_CLOSE;
break;
}
step_ = NEXT_CELL;
}
break;
case SCAN_CLOSE:
{
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseScanRowwiseTaskTcb::ExHbaseScanRowwiseTaskTcb(ExHbaseAccessSelectTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseScanRowwiseTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseScanRowwiseTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = SCAN_OPEN;
}
break;
case SCAN_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
if (tcb_->setupHbaseFilterPreds())
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
tcb_->columns_, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScanner(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->numCacheRows(),
FALSE,
(tcb_->hbaseFilterColumns_.entries() > 0 ?
&tcb_->hbaseFilterColumns_ : NULL),
(tcb_->hbaseFilterOps_.entries() > 0 ?
&tcb_->hbaseFilterOps_ : NULL),
(tcb_->hbaseFilterValues_.entries() > 0 ?
&tcb_->hbaseFilterValues_ : NULL),
tcb_->getSamplePercentage(),
FALSE, 0, NULL, NULL, 0,
(tcb_->hbaseAccessTdb().getHbaseAccessOptions()
? tcb_->hbaseAccessTdb().getHbaseAccessOptions()->getNumVersions() : 0));
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
tcb_->isEOD_ = FALSE;
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
tcb_->rowwiseRowLen_ = 0;
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = SCAN_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->colVal_.len = tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell(tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_,
tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
{
step_ = CREATE_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
step_ = HANDLE_ERROR;
else
step_ = APPEND_ROW;
}
break;
case APPEND_ROW:
{
retcode = tcb_->copyCell();
if (tcb_->setupError(retcode, "ExHbaseAccessTcb::copyCell", "Not enough space in target buffer to move data"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case CREATE_ROW:
{
rc = tcb_->createRowwiseRow();
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
{
step_ = RETURN_ROW;
break;
}
else if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
else if (tcb_->isEOD_)
step_ = SCAN_CLOSE;
else
step_ = NEXT_ROW;
}
break;
case RETURN_ROW:
{
rc = 0;
if (tcb_->moveRowToUpQueue(tcb_->convertRow_,
tcb_->hbaseAccessTdb().convertRowLen(), &rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = SCAN_CLOSE;
break;
}
step_ = NEXT_ROW;
}
break;
case SCAN_CLOSE:
{
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
return 0; // WORK_OK
}
ExHbaseScanSQTaskTcb::ExHbaseScanSQTaskTcb(
ExHbaseAccessSelectTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseScanSQTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseScanSQTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = SCAN_OPEN;
}
break;
case SCAN_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
if (tcb_->setupHbaseFilterPreds())
{
step_ = HANDLE_ERROR;
break;
}
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
tcb_->columns_, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScanner(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->numCacheRows(),
TRUE,
(tcb_->hbaseFilterColumns_.entries() > 0 ?
&tcb_->hbaseFilterColumns_ : NULL),
(tcb_->hbaseFilterOps_.entries() > 0 ?
&tcb_->hbaseFilterOps_ : NULL),
(tcb_->hbaseFilterValues_.entries() > 0 ?
&tcb_->hbaseFilterValues_ : NULL),
tcb_->getSamplePercentage(),
tcb_->hbaseAccessTdb().getHbaseSnapshotScanAttributes()->getUseSnapshotScan(),
tcb_->hbaseAccessTdb().getHbaseSnapshotScanAttributes()->getSnapshotScanTimeout(),
tcb_->hbaseAccessTdb().getHbaseSnapshotScanAttributes()->getSnapshotName(),
tcb_->hbaseAccessTdb().getHbaseSnapshotScanAttributes()->getSnapScanTmpLocation(),
tcb_->getGlobals()->castToExExeStmtGlobals()->getMyInstanceNumber(),
(tcb_->hbaseAccessTdb().multiVersions()
? tcb_->hbaseAccessTdb().getHbaseAccessOptions()->getNumVersions() : 0)
);
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = SCAN_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else if (tcb_->hbaseAccessTdb().multiVersions())
step_ = SETUP_MULTI_VERSION_ROW;
else
step_ = CREATE_ROW;
}
break;
case SETUP_MULTI_VERSION_ROW:
{
retcode = tcb_->setupSQMultiVersionRow();
if (retcode == HBASE_ACCESS_NO_ROW)
{
step_ = NEXT_ROW;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
tcb_->setupError(rc, "setupSQMultiVersionRow");
step_ = HANDLE_ERROR;
break;
}
step_ = CREATE_ROW;
}
break;
case CREATE_ROW:
{
retcode = tcb_->createSQRowDirect();
if (retcode == HBASE_ACCESS_NO_ROW)
{
step_ = NEXT_ROW;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
tcb_->setupError(rc, "createSQRowDirect");
step_ = HANDLE_ERROR;
break;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else if (tcb_->hbaseAccessTdb().multiVersions())
step_ = CREATE_ROW;
else
step_ = NEXT_ROW;
}
break;
case RETURN_ROW:
{
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->convertRowLen_,
&rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = SCAN_CLOSE;
break;
}
if (tcb_->hbaseAccessTdb().multiVersions())
step_ = CREATE_ROW;
else
step_ = NEXT_ROW;
}
break;
case SCAN_CLOSE:
{
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
Lng32 ExHbaseScanSQTaskTcb::getProbeResult(char* &keyData)
{
Lng32 retcode = 0;
Lng32 rc = 0;
Lng32 probeSize = 100; // using fewer rows results in intermittent wrong
//results. Using the hbase default scan size of 100 as a workarorund.
if (tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useMinMdamProbeSize())
probeSize = 1; // if performance is vital, comp_bool_184 can be set to ON
// to choose this path.
retcode = tcb_->ehi_->scanOpen(tcb_->table_,
tcb_->beginRowId_, tcb_->endRowId_,
tcb_->columns_, -1,
tcb_->hbaseAccessTdb().readUncommittedScan(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->cacheBlocks(),
tcb_->hbaseAccessTdb().getHbasePerfAttributes()->useSmallScanner(),
probeSize,
TRUE, NULL, NULL, NULL);
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanOpen"))
{
rc = -1;
goto label_return;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
rc = 1; // no row found
goto label_return;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
{
rc = -1;
goto label_return;
}
retcode = tcb_->createSQRowDirect();
if (retcode == HBASE_ACCESS_NO_ROW)
{
rc = 1;
goto label_return;
}
if (retcode < 0)
{
rc = retcode;
tcb_->setupError(rc, "createSQRowDirect");
rc = -1;
goto label_return;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
rc = -1;
goto label_return;
}
// extract the key from the fetched row, encode it and pass it back to mdam
if (tcb_->evalEncodedKeyExpr() == -1)
{
rc = -1;
goto label_return;
}
label_return:
retcode = tcb_->ehi_->scanClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::scanClose"))
{
rc = -1;
}
keyData = tcb_->encodedKeyRow_;
return rc;
}
ExHbaseGetTaskTcb::ExHbaseGetTaskTcb(
ExHbaseAccessSelectTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseGetTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseGetTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = GET_OPEN;
}
break;
case GET_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
if (tcb_->evalRowIdExpr() == -1)
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->rowIds_.entries() == 1)
{
retcode = tcb_->ehi_->getRowOpen(tcb_->table_, tcb_->rowIds_[0],
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
else
{
retcode = tcb_->ehi_->getRowsOpen(tcb_->table_, &tcb_->rowIds_,
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowsOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = GET_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell( tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_, tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
{
step_ = NEXT_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
step_ = HANDLE_ERROR;
else
step_ = CREATE_ROW;
}
break;
case CREATE_ROW:
{
rc = tcb_->createColumnwiseRow();
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
else if (tcb_->setupError(rc, "ExHbaseAccessTcb::createColumnwiseRow", "Not enough space in target buffer to move data"))
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case RETURN_ROW:
{
rc = 0;
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = GET_CLOSE;
break;
}
step_ = NEXT_CELL;
}
break;
case GET_CLOSE:
{
retcode = tcb_->ehi_->getClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::getClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseGetRowwiseTaskTcb::ExHbaseGetRowwiseTaskTcb(
ExHbaseAccessSelectTcb * tcb)
: ExHbaseTaskTcb(tcb)
, step_(NOT_STARTED)
{
}
void ExHbaseGetRowwiseTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseGetRowwiseTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
Lng32 remainingInBatch = batchSize_;
while (1)
{
ex_queue_entry *pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = GET_OPEN;
}
break;
case GET_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
if (tcb_->evalRowIdExpr() == -1)
{
step_ = HANDLE_ERROR;
break;
}
if (tcb_->rowIds_.entries() == 1)
{
retcode = tcb_->ehi_->getRowOpen(tcb_->table_, tcb_->rowIds_[0],
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
else
{
retcode = tcb_->ehi_->getRowsOpen(tcb_->table_, &tcb_->rowIds_,
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowsOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
}
break;
case NEXT_ROW:
{
if (--remainingInBatch <= 0)
{
rc = WORK_CALL_AGAIN;
return 1;
}
retcode = tcb_->ehi_->nextRow();
if (retcode == HBASE_ACCESS_EOD || retcode == HBASE_ACCESS_EOR)
{
step_ = GET_CLOSE;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case NEXT_CELL:
{
if (tcb_->colVal_.val == NULL)
tcb_->colVal_.val = new (tcb_->getHeap())
char[tcb_->hbaseAccessTdb().convertRowLen()];
tcb_->colVal_.len = tcb_->hbaseAccessTdb().convertRowLen();
retcode = tcb_->ehi_->nextCell(tcb_->rowId_, tcb_->colFamName_,
tcb_->colName_, tcb_->colVal_, tcb_->colTS_);
if (retcode == HBASE_ACCESS_EOD)
step_ = CREATE_ROW;
else
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextCell"))
step_ = HANDLE_ERROR;
else
step_ = APPEND_ROW;
}
break;
case APPEND_ROW:
{
retcode = tcb_->copyCell();
if (tcb_->setupError(retcode, "ExHbaseAccessTcb::copyCell", "Not enough space in target buffer to move data"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_CELL;
}
break;
case CREATE_ROW:
{
rc = tcb_->createRowwiseRow();
if (rc == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else
step_ = GET_CLOSE;
}
break;
case RETURN_ROW:
{
rc = 0;
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
step_ = GET_CLOSE;
}
break;
case GET_CLOSE:
{
retcode = tcb_->ehi_->getClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::getClose"))
step_ = HANDLE_ERROR;
else
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
{
step_ = NOT_STARTED;
return 0;
}
break;
}// switch
} // while
}
ExHbaseGetSQTaskTcb::ExHbaseGetSQTaskTcb( ExHbaseAccessTcb * tcb, NABoolean rowsetTcb)
: ExHbaseTaskTcb(tcb, rowsetTcb)
, step_(NOT_STARTED)
{
}
void ExHbaseGetSQTaskTcb::init()
{
step_ = NOT_STARTED;
}
ExWorkProcRetcode ExHbaseGetSQTaskTcb::work(short &rc)
{
Lng32 retcode = 0;
rc = 0;
while (1)
{
ex_queue_entry *pentry_down = NULL;
if (! tcb_->qparent_.down->isEmpty())
pentry_down = tcb_->qparent_.down->getHeadEntry();
switch (step_)
{
case NOT_STARTED:
{
step_ = GET_OPEN;
}
break;
case GET_OPEN:
{
tcb_->table_.val = tcb_->hbaseAccessTdb().getTableName();
tcb_->table_.len = strlen(tcb_->hbaseAccessTdb().getTableName());
remainingInBatch_ = tcb_->rowIds_.entries();
if (tcb_->rowIds_.entries() == 1)
{
retcode = tcb_->ehi_->getRowOpen(tcb_->table_, tcb_->rowIds_[0],
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
else
{
retcode = tcb_->ehi_->getRowsOpen(tcb_->table_, &tcb_->rowIds_,
tcb_->columns_, -1);
if (tcb_->setupError(retcode, "ExpHbaseInterface::getRowsOpen"))
step_ = HANDLE_ERROR;
else
step_ = NEXT_ROW;
}
}
break;
case NEXT_ROW:
{
if (remainingInBatch_ <= 0) {
step_ = GET_CLOSE;
break;
}
retcode = tcb_->ehi_->nextRow();
remainingInBatch_--;
// EOD is end of data, EOR is end of result set.
// for single get, EOD or EOR indicates DONE
// for multi get, only EOR indicates DONE
if ( (retcode == HBASE_ACCESS_EOR) ||
( (retcode == HBASE_ACCESS_EOD) &&
(tcb_->rowIds_.entries() == 1) ) )
{
if (rowsetTcb_)
step_ = DONE;
else
step_ = GET_CLOSE;
break;
}
// for multi get, do FETCH if retcode is EOD
if ( (retcode == HBASE_ACCESS_EOD) &&
(tcb_->rowIds_.entries() > 1) )
{
if (rowsetTcb_)
step_ = DONE;
else
step_ = NEXT_ROW;
break;
}
if (tcb_->setupError(retcode, "ExpHbaseInterface::nextRow"))
step_ = HANDLE_ERROR;
else
step_ = CREATE_ROW;
}
break;
case CREATE_ROW:
{
retcode = tcb_->createSQRowDirect();
if (retcode == HBASE_ACCESS_NO_ROW)
{
if (rowsetTcb_)
step_ = DONE;
else
step_ = NEXT_ROW;
break;
}
if (retcode < 0)
{
rc = (short)retcode;
tcb_->setupError(rc, "createSQRowDirect");
step_ = HANDLE_ERROR;
break;
}
if (retcode != HBASE_ACCESS_SUCCESS)
{
step_ = HANDLE_ERROR;
break;
}
step_ = APPLY_PRED;
}
break;
case APPLY_PRED:
{
rc = tcb_->applyPred(tcb_->scanExpr());
if (rc == 1)
step_ = RETURN_ROW;
else if (rc == -1)
step_ = HANDLE_ERROR;
else
{
if (rowsetTcb_)
step_ = DONE;
else
step_ = NEXT_ROW;
}
}
break;
case RETURN_ROW:
{
rc = 0;
if (tcb_->moveRowToUpQueue(tcb_->convertRow_, tcb_->hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
if (tcb_->getHbaseAccessStats())
tcb_->getHbaseAccessStats()->incUsedRows();
if (rowsetTcb_)
step_ = DONE;
else
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == tcb_->matches_))
{
step_ = GET_CLOSE;
break;
}
step_ = NEXT_ROW;
}
}
break;
case GET_CLOSE:
{
retcode = tcb_->ehi_->getClose();
if (tcb_->setupError(retcode, "ExpHbaseInterface::getClose"))
step_ = HANDLE_ERROR;
else
step_ = ALL_DONE;
}
break;
case HANDLE_ERROR:
{
step_ = NOT_STARTED;
return -1;
}
break;
case DONE:
if (tcb_->handleDone(rc, 0))
return 1;
else
step_ = NEXT_ROW;
break;
case ALL_DONE:
step_ = NOT_STARTED;
return 0;
break;
}// switch
} // while
return 0;
}
ExHbaseAccessSelectTcb::ExHbaseAccessSelectTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessTcb(hbaseAccessTdb, glob),
step_(NOT_STARTED)
{
scanRowwiseTaskTcb_ = NULL;
scanTaskTcb_ = NULL;
getRowwiseTaskTcb_ = NULL;
getTaskTcb_ = NULL;
scanSQTaskTcb_ = NULL;
getSQTaskTcb_ = NULL;
ExHbaseAccessTdb &hbaseTdb = (ExHbaseAccessTdb&)hbaseAccessTdb;
samplePercentage_ = hbaseTdb.samplingRate_;
if ((hbaseTdb.listOfScanRows()) ||
((hbaseTdb.keySubsetGen()) &&
(NOT hbaseTdb.uniqueKeyInfo())) ||
(hbaseTdb.keyMDAMGen()))
{
if (hbaseTdb.sqHbaseTable())
scanSQTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseScanSQTaskTcb(this);
else if (hbaseTdb.rowwiseFormat())
scanRowwiseTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseScanRowwiseTaskTcb(this);
else
scanTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseScanTaskTcb(this);
}
if ((hbaseTdb.listOfGetRows()) ||
((hbaseTdb.keySubsetGen()) &&
(hbaseTdb.uniqueKeyInfo())) ||
(hbaseTdb.keyMDAMGen()))
{
if (hbaseTdb.sqHbaseTable())
getSQTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseGetSQTaskTcb(this, FALSE);
else if (hbaseTdb.rowwiseFormat())
getRowwiseTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseGetRowwiseTaskTcb(this);
else
getTaskTcb_ =
new(getGlobals()->getDefaultHeap()) ExHbaseGetTaskTcb(this);
}
if (hbaseTdb.sqHbaseTable())
{
scanTask_ = scanSQTaskTcb_;
getTask_ = getSQTaskTcb_;
}
else if (hbaseTdb.rowwiseFormat())
{
scanTask_ = scanRowwiseTaskTcb_;
getTask_ = getRowwiseTaskTcb_;
}
else
{
scanTask_ = scanTaskTcb_;
getTask_ = getTaskTcb_;
}
}
ExWorkProcRetcode ExHbaseAccessSelectTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if ((pentry_down->downState.request == ex_queue::GET_NOMORE) &&
(step_ != DONE))
{
step_ = SELECT_CLOSE_NO_ERROR; //DONE;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
{
step_ = DONE;
break;
}
step_ = SELECT_INIT;
}
break;
case SELECT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
if (hbaseAccessTdb().listOfScanRows())
hbaseAccessTdb().listOfScanRows()->position();
if (hbaseAccessTdb().listOfGetRows())
hbaseAccessTdb().listOfGetRows()->position();
if (scanTask_)
scanTask_->init();
if (getTask_)
getTask_->init();
step_ = SETUP_SCAN;
}
break;
case SETUP_SCAN:
{
if ((! scanTask_) || (! hbaseAccessTdb().listOfScanRows()))
{
step_ = SETUP_GET;
break;
}
hsr_ =
(ComTdbHbaseAccess::HbaseScanRows*)hbaseAccessTdb().listOfScanRows()
->getCurr();
retcode = setupSubsetRowIdsAndCols(hsr_);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_SCAN;
}
break;
case PROCESS_SCAN:
case PROCESS_SCAN_KEY:
{
rc = 0;
retcode = scanTask_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else if (step_ == PROCESS_SCAN_KEY)
step_ = SETUP_GET_KEY;
else
step_ = NEXT_SCAN;
}
break;
case NEXT_SCAN:
{
hbaseAccessTdb().listOfScanRows()->advance();
if (! hbaseAccessTdb().listOfScanRows()->atEnd())
{
step_ = SETUP_SCAN;
break;
}
step_ = SETUP_GET;
}
break;
case SETUP_GET:
{
if ((! getTask_) || (!hbaseAccessTdb().listOfGetRows()))
{
step_ = SETUP_SCAN_KEY;
break;
}
hgr_ =
(ComTdbHbaseAccess::HbaseGetRows*)hbaseAccessTdb().listOfGetRows()
->getCurr();
retcode = setupUniqueRowIdsAndCols(hgr_);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_GET;
}
break;
case PROCESS_GET:
case PROCESS_GET_KEY:
{
rc = 0;
retcode = getTask_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else if (step_ == PROCESS_GET_KEY)
step_ = SELECT_CLOSE;
else
step_ = NEXT_GET;
}
break;
case NEXT_GET:
{
hbaseAccessTdb().listOfGetRows()->advance();
if (! hbaseAccessTdb().listOfGetRows()->atEnd())
{
step_ = SETUP_GET;
break;
}
step_ = SETUP_SCAN_KEY;
}
break;
case SETUP_SCAN_KEY:
{
if (! hbaseAccessTdb().keySubsetGen())
{
step_ = SELECT_CLOSE;
break;
}
if (hbaseAccessTdb().uniqueKeyInfo())
{
step_ = SETUP_GET_KEY;
break;
}
retcode = setupSubsetKeysAndCols();
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_SCAN_KEY;
}
break;
case SETUP_GET_KEY:
{
if ((! getTask_) ||
((hbaseAccessTdb().keySubsetGen()) &&
(NOT hbaseAccessTdb().uniqueKeyInfo())))
{
step_ = SELECT_CLOSE;
break;
}
retcode = setupUniqueKeyAndCols(TRUE);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
step_ = PROCESS_GET_KEY;
}
break;
case SELECT_CLOSE:
case SELECT_CLOSE_NO_ERROR:
{
retcode = ehi_->close();
if (step_ == SELECT_CLOSE)
{
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR_NO_CLOSE;
break;
}
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
case HANDLE_ERROR_NO_CLOSE:
{
if (handleError(rc))
return rc;
if (step_ == HANDLE_ERROR)
retcode = ehi_->close();
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
if (scanTask_)
scanTask_->init();
if (getTask_)
getTask_->init();
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseAccessMdamSelectTcb::ExHbaseAccessMdamSelectTcb(
const ExHbaseAccessTdb &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessSelectTcb(hbaseAccessTdb, glob),
step_(NOT_STARTED)
{
}
ExWorkProcRetcode ExHbaseAccessMdamSelectTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if ((pentry_down->downState.request == ex_queue::GET_NOMORE) &&
(step_ != DONE))
{
step_ = SELECT_CLOSE_NO_ERROR;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
matchesBeforeFetch_ = 0;
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
{
step_ = DONE;
break;
}
step_ = SELECT_INIT;
}
break;
case SELECT_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
step_ = INIT_NEXT_KEY_RANGE;
}
break;
case INIT_NEXT_KEY_RANGE:
{
retcode = initNextKeyRange(pool_, pentry_down->getAtp());
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
setupListOfColNames(hbaseAccessTdb().listOfFetchedColNames(),
columns_);
fetchRangeHadRows_ = TRUE;
step_ = GET_NEXT_KEY_RANGE;
}
break;
case GET_NEXT_KEY_RANGE:
{
keyRangeEx::getNextKeyRangeReturnType
keyRangeStatus = setupSubsetKeys(fetchRangeHadRows_);
fetchRangeHadRows_ = FALSE;
if (keyRangeStatus == keyRangeEx::EXPRESSION_ERROR)
{
step_ = HANDLE_ERROR;
break;
}
if (keyRangeStatus == keyRangeEx::NO_MORE_RANGES)
{
step_ = SELECT_CLOSE;
break;
}
if (keyRangeStatus == keyRangeEx::PROBE_RANGE)
step_ = PROCESS_PROBE_RANGE;
else if (keyRangeStatus == keyRangeEx::FETCH_RANGE)
{
matchesBeforeFetch_ = matches_;
step_ = PROCESS_FETCH_RANGE;
}
else
step_ = HANDLE_ERROR;
}
break;
case PROCESS_PROBE_RANGE:
{
char * keyData = NULL;
retcode = scanSQTaskTcb_->getProbeResult(keyData);
if (retcode == -1)
{
step_ = HANDLE_ERROR;
break;
}
if (retcode == 1) // no rows found
{
keyExeExpr()->reportProbeResult(0);
step_ = GET_NEXT_KEY_RANGE;
break;
}
// pass the key value to the mdam generator
keyExeExpr()->reportProbeResult(keyData);
step_ = GET_NEXT_KEY_RANGE;
}
break;
case PROCESS_FETCH_RANGE:
{
rc = 0;
retcode = scanSQTaskTcb_->work(rc);
if (retcode == 1)
return rc;
else if (retcode < 0)
step_ = HANDLE_ERROR;
else
{
if ((pentry_down->downState.request == ex_queue::GET_N) &&
(pentry_down->downState.requestValue == matches_))
{
step_ = SELECT_CLOSE;
break;
}
if (matches_ > matchesBeforeFetch_)
fetchRangeHadRows_ = TRUE;
step_ = GET_NEXT_KEY_RANGE;
}
}
break;
case SELECT_CLOSE:
case SELECT_CLOSE_NO_ERROR:
{
retcode = ehi_->close();
if (step_ == SELECT_CLOSE)
{
if (setupError(retcode, "ExpHbaseInterface::close"))
{
step_ = HANDLE_ERROR_NO_CLOSE;
break;
}
}
step_ = DONE;
}
break;
case HANDLE_ERROR:
case HANDLE_ERROR_NO_CLOSE:
{
if (handleError(rc))
return rc;
if (step_ == HANDLE_ERROR)
retcode = ehi_->close();
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
ExHbaseCoProcAggrTcb::ExHbaseCoProcAggrTcb(
const ComTdbHbaseCoProcAggr &hbaseAccessTdb,
ex_globals * glob ) :
ExHbaseAccessTcb(hbaseAccessTdb, glob),
step_(NOT_STARTED)
{
}
ExWorkProcRetcode ExHbaseCoProcAggrTcb::work()
{
Lng32 retcode = 0;
short rc = 0;
while (!qparent_.down->isEmpty())
{
ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
if ((pentry_down->downState.request == ex_queue::GET_NOMORE) &&
(step_ != DONE))
{
// step_ = SELECT_CLOSE_NO_ERROR;
}
switch (step_)
{
case NOT_STARTED:
{
matches_ = 0;
step_ = COPROC_INIT;
}
break;
case COPROC_INIT:
{
retcode = ehi_->init(getHbaseAccessStats());
if (setupError(retcode, "ExpHbaseInterface::init"))
{
step_ = HANDLE_ERROR;
break;
}
table_.val = hbaseAccessTdb().getTableName();
table_.len = strlen(hbaseAccessTdb().getTableName());
hbaseAccessTdb().listOfFetchedColNames()->position();
hbaseAccessTdb().listOfAggrTypes()->position();
aggrIdx_ = 0;
step_ = COPROC_EVAL;
}
break;
case COPROC_EVAL:
{
Lng32 aggrType = *(short*)hbaseAccessTdb().listOfAggrTypes()->getCurr();
char * col = (char*)hbaseAccessTdb().listOfFetchedColNames()->getCurr();
Text aggrVal;
Text colFam;
Text colName;
retcode = extractColFamilyAndName(col, colFam, colName);
if (retcode)
{
step_ = HANDLE_ERROR;
break;
}
retcode = ehi_->coProcAggr(table_,
aggrType,
"", // startRow
"", // stopRow
colFam,
colName,
FALSE, // cacheBlocks
100, //numCacheRows
aggrVal);
if (setupError(retcode, "ExpHbaseInterface::coProcAggr"))
{
step_ = HANDLE_ERROR;
break;
}
ExpTupleDesc * convertTuppTD =
hbaseAccessTdb().workCriDesc_->getTupleDescriptor
(hbaseAccessTdb().convertTuppIndex_);
Attributes * attr = convertTuppTD->getAttr(aggrIdx_);
if (! attr)
{
step_ = HANDLE_ERROR;
break;
}
if (attr->getNullFlag())
{
*(short*)&convertRow_[attr->getNullIndOffset()] = 0;
}
str_cpy_all(&convertRow_[attr->getOffset()], aggrVal.data(), aggrVal.length());
hbaseAccessTdb().listOfAggrTypes()->advance();
hbaseAccessTdb().listOfFetchedColNames()->advance();
aggrIdx_++;
if (hbaseAccessTdb().listOfAggrTypes()->atEnd())
{
step_ = RETURN_ROW;
break;
}
}
break;
case RETURN_ROW:
{
short rc = 0;
if (moveRowToUpQueue(convertRow_, hbaseAccessTdb().convertRowLen(),
&rc, FALSE))
return 1;
step_ = DONE;
}
break;
case HANDLE_ERROR:
{
if (handleError(rc))
return rc;
retcode = ehi_->close();
step_ = DONE;
}
break;
case DONE:
{
if (handleDone(rc))
return rc;
step_ = NOT_STARTED;
}
break;
} // switch
} // while
return WORK_OK;
}
| 1 | 10,970 | Do you want this to be a stack variable? The work method might return at any time (example: when the parent queue fills up) and then get called again, which would cause this variable to become false again. I'm wondering if this variable should be a member instead? | apache-trafodion | cpp |
@@ -116,6 +116,14 @@ nebula::cpp2::HostAddr MetaServiceUtils::parseHostKey(folly::StringPiece key) {
return host;
}
+std::string MetaServiceUtils::schemaEdgesPrefix(GraphSpaceID spaceId) {
+ std::string key;
+ key.reserve(kEdgesTable.size() + sizeof(GraphSpaceID));
+ key.append(kEdgesTable.data(), kEdgesTable.size());
+ key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
+ return key;
+}
+
std::string MetaServiceUtils::schemaEdgeKey(GraphSpaceID spaceId,
EdgeType edgeType,
int64_t version) { | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/MetaServiceUtils.h"
#include <thrift/lib/cpp2/protocol/Serializer.h>
#include <thrift/lib/cpp2/protocol/CompactProtocol.h>
namespace nebula {
namespace meta {
const std::string kSpacesTable = "__spaces__"; // NOLINT
const std::string kPartsTable = "__parts__"; // NOLINT
const std::string kHostsTable = "__hosts__"; // NOLINT
const std::string kTagsTable = "__tags__"; // NOLINT
const std::string kEdgesTable = "__edges__"; // NOLINT
const std::string kIndexTable = "__index__"; // NOLINT
std::string MetaServiceUtils::spaceKey(GraphSpaceID spaceId) {
std::string key;
key.reserve(256);
key.append(kSpacesTable.data(), kSpacesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::spaceVal(int32_t partsNum,
int32_t replicaFactor,
const std::string& name) {
std::string val;
val.reserve(256);
val.append(reinterpret_cast<const char*>(&partsNum), sizeof(partsNum));
val.append(reinterpret_cast<const char*>(&replicaFactor), sizeof(replicaFactor));
val.append(name);
return val;
}
const std::string& MetaServiceUtils::spacePrefix() {
return kSpacesTable;
}
GraphSpaceID MetaServiceUtils::spaceId(folly::StringPiece rawKey) {
return *reinterpret_cast<const GraphSpaceID*>(rawKey.data() + kSpacesTable.size());
}
folly::StringPiece MetaServiceUtils::spaceName(folly::StringPiece rawVal) {
return rawVal.subpiece(sizeof(int32_t)*2);
}
std::string MetaServiceUtils::partKey(GraphSpaceID spaceId, PartitionID partId) {
std::string key;
key.reserve(128);
key.append(kPartsTable.data(), kPartsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
key.append(reinterpret_cast<const char*>(&partId), sizeof(PartitionID));
return key;
}
std::string MetaServiceUtils::partVal(const std::vector<nebula::cpp2::HostAddr>& hosts) {
std::string val;
val.reserve(128);
for (auto& h : hosts) {
val.append(reinterpret_cast<const char*>(&h.ip), sizeof(h.ip));
val.append(reinterpret_cast<const char*>(&h.port), sizeof(h.port));
}
return val;
}
std::string MetaServiceUtils::partPrefix(GraphSpaceID spaceId) {
std::string prefix;
prefix.reserve(128);
prefix.append(kPartsTable.data(), kPartsTable.size());
prefix.append(reinterpret_cast<const char*>(&spaceId), sizeof(GraphSpaceID));
return prefix;
}
std::vector<nebula::cpp2::HostAddr> MetaServiceUtils::parsePartVal(folly::StringPiece val) {
std::vector<nebula::cpp2::HostAddr> hosts;
static const size_t unitSize = sizeof(int32_t) * 2;
auto hostsNum = val.size() / unitSize;
hosts.reserve(hostsNum);
VLOG(3) << "Total size:" << val.size()
<< ", host size:" << unitSize
<< ", host num:" << hostsNum;
for (decltype(hostsNum) i = 0; i < hostsNum; i++) {
nebula::cpp2::HostAddr h;
h.set_ip(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize));
h.set_port(*reinterpret_cast<const int32_t*>(val.data() + i * unitSize + sizeof(int32_t)));
hosts.emplace_back(std::move(h));
}
return hosts;
}
std::string MetaServiceUtils::hostKey(IPv4 ip, Port port) {
std::string key;
key.reserve(128);
key.append(kHostsTable.data(), kHostsTable.size());
key.append(reinterpret_cast<const char*>(&ip), sizeof(ip));
key.append(reinterpret_cast<const char*>(&port), sizeof(port));
return key;
}
std::string MetaServiceUtils::hostVal() {
return "";
}
const std::string& MetaServiceUtils::hostPrefix() {
return kHostsTable;
}
nebula::cpp2::HostAddr MetaServiceUtils::parseHostKey(folly::StringPiece key) {
nebula::cpp2::HostAddr host;
memcpy(&host, key.data() + kHostsTable.size(), sizeof(host));
return host;
}
std::string MetaServiceUtils::schemaEdgeKey(GraphSpaceID spaceId,
EdgeType edgeType,
int64_t version) {
std::string key;
key.reserve(128);
key.append(kEdgesTable.data(), kEdgesTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&edgeType), sizeof(edgeType));
key.append(reinterpret_cast<const char*>(&version), sizeof(version));
return key;
}
std::string MetaServiceUtils::schemaEdgeVal(nebula::cpp2::Schema schema) {
std::string val;
apache::thrift::CompactSerializer::serialize(schema, &val);
return val;
}
std::string MetaServiceUtils::schemaTagKey(GraphSpaceID spaceId, TagID tagId, int64_t version) {
int64_t storageVer = std::numeric_limits<int64_t>::max() - version;
std::string key;
key.reserve(128);
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
key.append(reinterpret_cast<const char*>(&storageVer), sizeof(version));
return key;
}
int64_t MetaServiceUtils::parseTagVersion(folly::StringPiece key) {
auto offset = kTagsTable.size() + sizeof(GraphSpaceID) + sizeof(TagID);
int64_t ver = std::numeric_limits<int64_t>::max() -
*reinterpret_cast<const int64_t*>(key.begin() + offset);
return ver;
}
std::string MetaServiceUtils::schemaTagPrefix(GraphSpaceID spaceId, TagID tagId) {
std::string key;
key.reserve(128);
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
key.append(reinterpret_cast<const char*>(&tagId), sizeof(tagId));
return key;
}
std::string MetaServiceUtils::schemaTagsPrefix(GraphSpaceID spaceId) {
std::string key;
key.reserve(kTagsTable.size() + sizeof(GraphSpaceID));
key.append(kTagsTable.data(), kTagsTable.size());
key.append(reinterpret_cast<const char*>(&spaceId), sizeof(spaceId));
return key;
}
std::string MetaServiceUtils::schemaTagVal(const std::string& name, nebula::cpp2::Schema schema) {
int32_t len = name.size();
std::string val, sval;
apache::thrift::CompactSerializer::serialize(schema, &sval);
val.reserve(sizeof(int32_t) + name.size() + sval.size());
val.append(reinterpret_cast<const char*>(&len), sizeof(int32_t));
val.append(name);
val.append(sval);
return val;
}
nebula::cpp2::Schema MetaServiceUtils::parseSchema(folly::StringPiece rawData) {
nebula::cpp2::Schema schema;
int32_t offset = sizeof(int32_t) + *reinterpret_cast<const int32_t *>(rawData.begin());
auto schval = rawData.subpiece(offset, rawData.size() - offset);
apache::thrift::CompactSerializer::deserialize(schval, schema);
return schema;
}
std::string MetaServiceUtils::indexKey(EntryType type, const std::string& name) {
std::string key;
key.reserve(128);
key.append(kIndexTable.data(), kIndexTable.size());
key.append(reinterpret_cast<const char*>(&type), sizeof(type));
key.append(name);
return key;
}
std::string MetaServiceUtils::assembleSegmentKey(const std::string& segment,
const std::string& key) {
std::string segmentKey;
segmentKey.reserve(64);
segmentKey.append(segment);
segmentKey.append(key.data(), key.size());
return segmentKey;
}
} // namespace meta
} // namespace nebula
| 1 | 16,353 | You could reserve a const space to save the extra computation. | vesoft-inc-nebula | cpp |
@@ -15,6 +15,8 @@ namespace System.IO.Tests
private readonly string _testPath200 = PerfUtils.CreateString(200);
private readonly string _testPath500 = PerfUtils.CreateString(500);
private readonly string _testPath1000 = PerfUtils.CreateString(1000);
+ private readonly string _testPathNoRedundantSegments = "/home/user/runtime/src/coreclr/runtime/src/libraries/System.Private.CoreLib/src/System/IO/Path.cs";
+ private readonly string _testPathWithRedundantSegments = "/home/user/runtime/src/coreclr/runtime/src/libraries/System.Private.CoreLib/src/System/IO/..//./Path.cs";
[Benchmark]
public string Combine() => Path.Combine(_testPath, _testPath10); | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using BenchmarkDotNet.Attributes;
using MicroBenchmarks;
namespace System.IO.Tests
{
[BenchmarkCategory(Categories.Libraries)]
public class Perf_Path
{
private readonly string _testPath = FileUtils.GetTestFilePath();
private readonly string _testPath10 = PerfUtils.CreateString(10);
private readonly string _testPath200 = PerfUtils.CreateString(200);
private readonly string _testPath500 = PerfUtils.CreateString(500);
private readonly string _testPath1000 = PerfUtils.CreateString(1000);
[Benchmark]
public string Combine() => Path.Combine(_testPath, _testPath10);
[Benchmark]
public string GetFileName() => Path.GetFileName(_testPath);
[Benchmark]
public string GetDirectoryName() => Path.GetDirectoryName(_testPath);
[Benchmark]
public string ChangeExtension() => Path.ChangeExtension(_testPath, ".new");
[Benchmark]
public string GetExtension() => Path.GetExtension(_testPath);
[Benchmark]
public string GetFileNameWithoutExtension() => Path.GetFileNameWithoutExtension(_testPath);
[Benchmark]
public string GetFullPathForLegacyLength() => Path.GetFullPath(_testPath200);
#if !NETFRAMEWORK // long paths are always supported on .NET Core
[Benchmark]
public string GetFullPathForTypicalLongPath() => Path.GetFullPath(_testPath500);
[Benchmark]
public void GetFullPathForReallyLongPath() => Path.GetFullPath(_testPath1000);
#endif
[Benchmark]
public string GetPathRoot() => Path.GetPathRoot(_testPath);
[Benchmark]
public string GetRandomFileName() => Path.GetRandomFileName();
[Benchmark]
public string GetTempPath() => Path.GetTempPath();
[Benchmark]
public bool HasExtension() => Path.HasExtension(_testPath);
[Benchmark]
public bool IsPathRooted() => Path.IsPathRooted(_testPath);
}
}
| 1 | 11,690 | Did you consider adding rooted paths like `C:\ProgramData` and paths with flipped separators? | dotnet-performance | .cs |
@@ -93,10 +93,11 @@ func startDnsServer() {
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
- defer udpConn.Close()
if err != nil {
klog.Errorf("Dns server Start error : %s", err)
+ return
}
+ defer udpConn.Close()
dnsConn = udpConn
for {
req := make([]byte, bufSize) | 1 | package server
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"net"
"os"
"strings"
"time"
"unsafe"
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core/context"
"github.com/kubeedge/kubeedge/edge/pkg/metamanager/client"
"github.com/kubeedge/kubeedge/edgemesh/pkg/common"
"github.com/kubeedge/kubeedge/edgemesh/pkg/proxy"
)
var (
dnsQr = uint16(0x8000)
oneByteSize = uint16(1)
twoByteSize = uint16(2)
ttl = uint32(64)
defaultFakeIP = []byte{5, 5, 5, 5}
)
const (
aRecord = 1
bufSize = 1024
notImplem = uint16(0x0004)
serverFAilure = uint16(0x0002)
)
type dnsHeader struct {
transactionID uint16
flags uint16
queNum uint16
ansNum uint16
authNum uint16
additNum uint16
}
type dnsQuestion struct {
from *net.UDPAddr
head *dnsHeader
name []byte
queByte []byte
qType uint16
qClasss uint16
queNum uint16
}
type dnsAnswer struct {
name []byte
qType uint16
qClass uint16
ttl uint32
dataLen uint16
addr []byte
}
//define the dns Question list type
type dnsQs []dnsQuestion
//metaClient is a query client
var metaClient client.CoreInterface
//dnsConn save DNS server
var dnsConn *net.UDPConn
//DnsStart is a External interface
func DnsStart() {
startDnsServer()
}
// startDnsServer start the DNS Server
func startDnsServer() {
// init meta client
c := context.GetContext(context.MsgCtxTypeChannel)
metaClient = client.New(c)
//get DNS server name
lip, err := getIP()
if err != nil {
klog.Errorf("Dns server Start error : %s", err)
return
}
laddr := &net.UDPAddr{
IP: lip,
Port: 53,
}
udpConn, err := net.ListenUDP("udp", laddr)
defer udpConn.Close()
if err != nil {
klog.Errorf("Dns server Start error : %s", err)
}
dnsConn = udpConn
for {
req := make([]byte, bufSize)
n, from, err := dnsConn.ReadFromUDP(req)
if err != nil || n <= 0 {
klog.Infof("DNS server get an IO error : %s", err)
continue
}
que, err := parseDnsQuery(req[:n])
if err != nil {
continue
}
for _, q := range que {
q.from = from
}
rsp := make([]byte, 0)
rsp, err = recordHandler(que, req[0:n])
if err != nil {
klog.Infof("DNS server get an resolve abnormal : %s", err)
continue
}
dnsConn.WriteTo(rsp, from)
}
}
//recordHandler returns the Answer for the dns question
func recordHandler(que []dnsQuestion, req []byte) (rsp []byte, err error) {
var exist bool
var ip string
for _, q := range que {
domainName := string(q.name)
exist, ip, _ = lookupFromMetaManager(domainName)
if err != nil {
rsp = nil
return
}
if !exist {
//if this service don't belongs to this cluster
go getfromRealDNS(req, q.from)
return rsp, fmt.Errorf("get from real DNS")
}
}
fakeIP := defaultFakeIP
if ip != "" {
fakeIP = net.ParseIP(ip).To4()
}
pre, err := modifyRspPrefix(que, fakeIP)
rsp = append(rsp, pre...)
for _, q := range que {
// head of each que is the same
if que[0].head.ansNum == 0 {
continue
}
//create a deceptive rep
dnsAns := &dnsAnswer{
name: q.name,
qType: q.qType,
qClass: q.qClasss,
ttl: ttl,
dataLen: uint16(len(fakeIP)),
addr: fakeIP,
}
ans := dnsAns.getAnswer()
rsp = append(rsp, ans...)
}
return rsp, nil
}
//parseDnsQuery returns question of the dns request
func parseDnsQuery(req []byte) (que []dnsQuestion, err error) {
head := &dnsHeader{}
head.getHeader(req)
if !head.isAQurey() {
return nil, errors.New("Ignore")
}
question := make(dnsQs, head.queNum)
offset := uint16(unsafe.Sizeof(dnsHeader{}))
question.getQuestion(req, offset, head)
que = question
err = nil
return
}
//isAQuery judge if the dns pkg is a Qurey process
func (h *dnsHeader) isAQurey() bool {
return h.flags&dnsQr != dnsQr
}
//getHeader get dns pkg head
func (h *dnsHeader) getHeader(req []byte) {
h.transactionID = binary.BigEndian.Uint16(req[0:2])
h.flags = binary.BigEndian.Uint16(req[2:4])
h.queNum = binary.BigEndian.Uint16(req[4:6])
h.ansNum = binary.BigEndian.Uint16(req[6:8])
h.authNum = binary.BigEndian.Uint16(req[8:10])
h.additNum = binary.BigEndian.Uint16(req[10:12])
}
//getQuestion get dns questions
func (q dnsQs) getQuestion(req []byte, offset uint16, head *dnsHeader) {
ost := offset
qNum := uint16(len(q))
for i := uint16(0); i < qNum; i++ {
tmp := ost
ost = q[i].getQName(req, ost)
q[i].qType = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q[i].qClasss = binary.BigEndian.Uint16(req[ost : ost+twoByteSize])
ost += twoByteSize
q[i].head = head
q[i].queByte = req[tmp:ost]
}
}
//getAnswer Generate Answer for the dns question
func (d *dnsAnswer) getAnswer() (ans []byte) {
ans = make([]byte, 0)
if d.qType == aRecord {
ans = append(ans, 0xc0)
ans = append(ans, 0x0c)
tmp16 := make([]byte, 2)
tmp32 := make([]byte, 4)
binary.BigEndian.PutUint16(tmp16, d.qType)
ans = append(ans, tmp16...)
binary.BigEndian.PutUint16(tmp16, d.qClass)
ans = append(ans, tmp16...)
binary.BigEndian.PutUint32(tmp32, d.ttl)
ans = append(ans, tmp32...)
binary.BigEndian.PutUint16(tmp16, d.dataLen)
ans = append(ans, tmp16...)
ans = append(ans, d.addr...)
}
return ans
}
// getQName get dns question domain name
func (q *dnsQuestion) getQName(req []byte, offset uint16) uint16 {
ost := offset
for {
qbyte := uint16(req[ost])
if qbyte == 0x00 {
q.name = q.name[:uint16(len(q.name))-oneByteSize]
return ost + oneByteSize
}
ost += oneByteSize
q.name = append(q.name, req[ost:ost+qbyte]...)
q.name = append(q.name, 0x2e)
ost += qbyte
}
}
// lookupFromMetaManager implement confirm the service exists
func lookupFromMetaManager(serviceUrl string) (exist bool, ip string, err error) {
name, namespace := common.SplitServiceKey(serviceUrl)
s, _ := metaClient.Services(namespace).Get(name)
if s != nil {
ip := ""
//Determine whether to use L4 proxy
if proxy.IsL4Proxy(s) {
svcName := namespace + "." + name
ip = proxy.GetServiceServer(svcName)
}
klog.Infof("Service %s is found in this cluster. namespace : %s, name: %s", serviceUrl, namespace, name)
return true, ip, nil
}
klog.Infof("Service %s is not found in this cluster", serviceUrl)
return false, "", nil
}
// getfromRealDNS returns the dns response from the real DNS server
func getfromRealDNS(req []byte, from *net.UDPAddr) {
rsp := make([]byte, 0)
ips, err := parseNameServer()
if err != nil {
return
}
laddr := &net.UDPAddr{
IP: net.IPv4zero,
Port: 0,
}
for _, ip := range ips { // get from real
raddr := &net.UDPAddr{
IP: ip,
Port: 53,
}
conn, err := net.DialUDP("udp", laddr, raddr)
defer conn.Close()
if err != nil {
continue
}
_, err = conn.Write(req)
if err != nil {
continue
}
if err = conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil {
continue
}
var n int
buf := make([]byte, bufSize)
n, err = conn.Read(buf)
if err != nil {
continue
}
if n > 0 {
rsp = append(rsp, buf[:n]...)
dnsConn.WriteToUDP(rsp, from)
break
}
}
}
// parseNameServer gets the nameserver from the resolv.conf
func parseNameServer() ([]net.IP, error) {
file, err := os.Open("/etc/resolv.conf")
if err != nil {
return nil, fmt.Errorf("error opening /etc/resolv.conf : %s", err)
}
defer file.Close()
scan := bufio.NewScanner(file)
scan.Split(bufio.ScanLines)
ip := make([]net.IP, 0)
for scan.Scan() { //get name server
serverString := scan.Text()
fmt.Println(serverString)
if strings.Contains(serverString, "nameserver") {
tmpString := strings.Replace(serverString, "nameserver", "", 1)
nameserver := strings.TrimSpace(tmpString)
sip := net.ParseIP(nameserver)
if sip != nil {
ip = append(ip, sip)
}
}
}
if len(ip) == 0 {
return nil, fmt.Errorf("there is no nameserver in /etc/resolv.conf")
}
return ip, nil
}
// modifyRspPrefix use req' head generate a rsp head
func modifyRspPrefix(que []dnsQuestion, fakeip []byte) (pre []byte, err error) {
ansNum := len(que)
if ansNum == 0 {
return
}
//use head in que. All the same
rspHead := que[0].head
rspHead.converQueryRsp(true)
serverError := false
if fakeip == nil || len(fakeip) != 4 {
serverError = true
}
if que[0].qType == aRecord && (!serverError) {
rspHead.setAnswerNum(uint16(ansNum))
} else {
rspHead.setAnswerNum(0)
}
rspHead.setRspRcode(que, serverError)
pre = rspHead.getByteFromDnsHeader()
for _, q := range que {
pre = append(pre, q.queByte...)
}
err = nil
return
}
// converQueryRsp conversion the dns head to a response for one query
func (h *dnsHeader) converQueryRsp(isRsp bool) {
if isRsp {
h.flags |= dnsQr
} else {
h.flags |= dnsQr
}
}
// set the Answer num for dns head
func (h *dnsHeader) setAnswerNum(num uint16) {
h.ansNum = num
}
// set the dns response return code
func (h *dnsHeader) setRspRcode(que dnsQs, serverError bool) {
for _, q := range que {
if q.qType != aRecord {
h.flags &= (^notImplem)
h.flags |= notImplem
} else if serverError {
h.flags &= (^serverFAilure)
h.flags |= serverFAilure
}
}
}
//getByteFromDnsHeader implement from dnsHeader struct to []byte
func (h *dnsHeader) getByteFromDnsHeader() (rspHead []byte) {
rspHead = make([]byte, unsafe.Sizeof(*h))
idxTran := unsafe.Sizeof(h.transactionID)
idxflag := unsafe.Sizeof(h.flags) + idxTran
idxque := unsafe.Sizeof(h.ansNum) + idxflag
idxans := unsafe.Sizeof(h.ansNum) + idxque
idxaut := unsafe.Sizeof(h.authNum) + idxans
idxadd := unsafe.Sizeof(h.additNum) + idxaut
binary.BigEndian.PutUint16(rspHead[:idxTran], h.transactionID)
binary.BigEndian.PutUint16(rspHead[idxTran:idxflag], h.flags)
binary.BigEndian.PutUint16(rspHead[idxflag:idxque], h.queNum)
binary.BigEndian.PutUint16(rspHead[idxque:idxans], h.ansNum)
binary.BigEndian.PutUint16(rspHead[idxans:idxaut], h.authNum)
binary.BigEndian.PutUint16(rspHead[idxaut:idxadd], h.additNum)
return
}
| 1 | 14,321 | does this line cause a panic if it is above the if condition ? | kubeedge-kubeedge | go |
@@ -0,0 +1,5 @@
+package org.phoenicis.javafx.components.library.utils;
+
+public enum LibraryDetailsPanels {
+ ShortcutDetails, ShortcutCreation, ShortcutEditing, Closed;
+} | 1 | 1 | 13,547 | Maybe `LibraryDetailsPanelType` would be clearer. | PhoenicisOrg-phoenicis | java |
|
@@ -149,7 +149,6 @@ static void runlevel_cb (runlevel_t *r, int level, int rc, double elapsed,
static void runlevel_io_cb (runlevel_t *r, const char *name,
const char *msg, void *arg);
-static int create_persistdir (attr_t *attrs, uint32_t rank);
static int create_rundir (attr_t *attrs);
static int create_broker_rundir (overlay_t *ov, void *arg);
static int create_dummyattrs (flux_t *h, uint32_t rank, uint32_t size); | 1 | /************************************************************\
* Copyright 2014 Lawrence Livermore National Security, LLC
* (c.f. AUTHORS, NOTICE.LLNS, COPYING)
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* SPDX-License-Identifier: LGPL-3.0
\************************************************************/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <sys/types.h>
#include <inttypes.h>
#include <sys/stat.h>
#include <sys/prctl.h>
#include <sys/signalfd.h>
#include <sys/resource.h>
#include <unistd.h>
#include <sys/param.h>
#include <stdbool.h>
#include <dlfcn.h>
#include <argz.h>
#include <flux/core.h>
#include <czmq.h>
#include <jansson.h>
#if HAVE_CALIPER
#include <caliper/cali.h>
#include <sys/syscall.h>
#endif
#if HAVE_VALGRIND
# if HAVE_VALGRIND_H
# include <valgrind.h>
# elif HAVE_VALGRIND_VALGRIND_H
# include <valgrind/valgrind.h>
# endif
#endif
#include "src/common/libutil/log.h"
#include "src/common/libutil/oom.h"
#include "src/common/libutil/xzmalloc.h"
#include "src/common/libutil/cleanup.h"
#include "src/common/libidset/idset.h"
#include "src/common/libutil/ipaddr.h"
#include "src/common/libutil/kary.h"
#include "src/common/libutil/monotime.h"
#include "src/common/libutil/zsecurity.h"
#include "src/common/libpmi/pmi.h"
#include "src/common/libpmi/pmi_strerror.h"
#include "src/common/libutil/fsd.h"
#include "src/common/libutil/errno_safe.h"
#include "heartbeat.h"
#include "module.h"
#include "overlay.h"
#include "service.h"
#include "hello.h"
#include "shutdown.h"
#include "attr.h"
#include "log.h"
#include "content-cache.h"
#include "runlevel.h"
#include "heaptrace.h"
#include "exec.h"
#include "ping.h"
#include "rusage.h"
#include "boot_config.h"
#include "boot_pmi.h"
#include "publisher.h"
typedef struct {
/* Reactor
*/
flux_t *h;
flux_reactor_t *reactor;
/* Sockets.
*/
overlay_t *overlay;
/* Session parameters
*/
attr_t *attrs;
struct flux_msg_cred cred; /* instance owner */
/* Modules
*/
modhash_t *modhash;
/* Misc
*/
bool verbose;
int event_recv_seq;
zlist_t *sigwatchers;
struct service_switch *services;
heartbeat_t *heartbeat;
struct shutdown *shutdown;
double shutdown_grace;
double heartbeat_rate;
int sec_typemask;
zlist_t *subscriptions; /* subscripts for internal services */
content_cache_t *cache;
struct publisher *publisher;
int tbon_k;
/* Bootstrap
*/
hello_t *hello;
runlevel_t *runlevel;
char *init_shell_cmd;
size_t init_shell_cmd_len;
} broker_ctx_t;
static int broker_event_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg);
static int broker_response_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg);
static void broker_request_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg);
static int broker_request_sendmsg_internal (broker_ctx_t *ctx,
const flux_msg_t *msg);
static void parent_cb (overlay_t *ov, void *sock, void *arg);
static void child_cb (overlay_t *ov, void *sock, void *arg);
static void module_cb (module_t *p, void *arg);
static void module_status_cb (module_t *p, int prev_state, void *arg);
static void hello_update_cb (hello_t *h, void *arg);
static void shutdown_cb (struct shutdown *s, void *arg);
static void signal_cb (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg);
static int broker_handle_signals (broker_ctx_t *ctx);
static flux_msg_handler_t **broker_add_services (broker_ctx_t *ctx);
static void broker_remove_services (flux_msg_handler_t *handlers[]);
static int load_module_byname (broker_ctx_t *ctx, const char *name,
const char *argz, size_t argz_len,
const flux_msg_t *request);
static int unload_module_byname (broker_ctx_t *ctx, const char *name,
const flux_msg_t *request);
static void set_proctitle (uint32_t rank);
static void runlevel_cb (runlevel_t *r, int level, int rc, double elapsed,
const char *state, void *arg);
static void runlevel_io_cb (runlevel_t *r, const char *name,
const char *msg, void *arg);
static int create_persistdir (attr_t *attrs, uint32_t rank);
static int create_rundir (attr_t *attrs);
static int create_broker_rundir (overlay_t *ov, void *arg);
static int create_dummyattrs (flux_t *h, uint32_t rank, uint32_t size);
static int handle_event (broker_ctx_t *ctx, const flux_msg_t *msg);
static void init_attrs (attr_t *attrs, pid_t pid);
static const struct flux_handle_ops broker_handle_ops;
static int parse_config_files (flux_t *h);
static int exit_rc = 1;
#define OPTIONS "+vM:X:k:s:g:EIS:"
static const struct option longopts[] = {
{"verbose", no_argument, 0, 'v'},
{"security", required_argument, 0, 's'},
{"module-path", required_argument, 0, 'X'},
{"k-ary", required_argument, 0, 'k'},
{"heartrate", required_argument, 0, 'H'},
{"shutdown-grace", required_argument, 0, 'g'},
{"setattr", required_argument, 0, 'S'},
{0, 0, 0, 0},
};
static void usage (void)
{
fprintf (stderr,
"Usage: flux-broker OPTIONS [initial-command ...]\n"
" -v,--verbose Be annoyingly verbose\n"
" -X,--module-path PATH Set module search path (colon separated)\n"
" -s,--security=plain|curve|none Select security mode (default: curve)\n"
" -k,--k-ary K Wire up in a k-ary tree\n"
" -H,--heartrate SECS Set heartrate in seconds (rank 0 only)\n"
" -g,--shutdown-grace SECS Set shutdown grace period in seconds\n"
" -S,--setattr ATTR=VAL Set broker attribute\n"
);
exit (1);
}
void parse_command_line_arguments (int argc, char *argv[], broker_ctx_t *ctx)
{
int c;
int e;
char *endptr;
while ((c = getopt_long (argc, argv, OPTIONS, longopts, NULL)) != -1) {
switch (c) {
case 's': /* --security=MODE */
if (!strcmp (optarg, "none")) {
ctx->sec_typemask = 0;
} else if (!strcmp (optarg, "plain")) {
ctx->sec_typemask |= ZSECURITY_TYPE_PLAIN;
ctx->sec_typemask &= ~ZSECURITY_TYPE_CURVE;
} else if (!strcmp (optarg, "curve")) {
ctx->sec_typemask |= ZSECURITY_TYPE_CURVE;
ctx->sec_typemask &= ~ZSECURITY_TYPE_PLAIN;
} else {
log_msg_exit ("--security arg must be none|plain|curve");
}
break;
case 'v': /* --verbose */
ctx->verbose = true;
break;
case 'X': /* --module-path PATH */
if (attr_set (ctx->attrs, "conf.module_path", optarg, true) < 0)
log_err_exit ("setting conf.module_path attribute");
break;
case 'k': /* --k-ary k */
errno = 0;
ctx->tbon_k = strtoul (optarg, &endptr, 10);
if (errno || *endptr != '\0')
log_err_exit ("k-ary '%s'", optarg);
if (ctx->tbon_k < 1)
usage ();
break;
case 'H': /* --heartrate SECS */
if (fsd_parse_duration (optarg, &ctx->heartbeat_rate) < 0)
log_err_exit ("heartrate '%s'", optarg);
break;
case 'g': /* --shutdown-grace SECS */
if (fsd_parse_duration (optarg, &ctx->shutdown_grace) < 0) {
log_err_exit ("shutdown-grace '%s'", optarg);
usage ();
}
break;
case 'S': { /* --setattr ATTR=VAL */
char *val, *attr = xstrdup (optarg);
if ((val = strchr (attr, '=')))
*val++ = '\0';
if (attr_add (ctx->attrs, attr, val, 0) < 0)
if (attr_set (ctx->attrs, attr, val, true) < 0)
log_err_exit ("setattr %s=%s", attr, val);
free (attr);
break;
}
default:
usage ();
}
}
if (optind < argc) {
if ((e = argz_create (argv + optind, &ctx->init_shell_cmd,
&ctx->init_shell_cmd_len)) != 0)
log_errn_exit (e, "argz_create");
}
}
static int setup_profiling (const char *program, int rank)
{
#if HAVE_CALIPER
cali_begin_string_byname ("flux.type", "main");
cali_begin_int_byname ("flux.tid", syscall (SYS_gettid));
cali_begin_string_byname ("binary", program);
cali_begin_int_byname ("flux.rank", rank);
// TODO: this is a stopgap until we have better control over
// instrumemtation in child processes. If we want to see what children
// that load libflux are up to, this should be disabled
unsetenv ("CALI_SERVICES_ENABLE");
unsetenv ("CALI_CONFIG_PROFILE");
#endif
return (0);
}
static int increase_rlimits (void)
{
struct rlimit rlim;
/* Increase number of open files to max to prevent potential failures
* due to file descriptor exhaustion (e.g. failure to open /dev/urandom)
*/
if (getrlimit (RLIMIT_NOFILE, &rlim) < 0) {
log_err ("getrlimit");
return -1;
}
rlim.rlim_cur = rlim.rlim_max;
if (setrlimit (RLIMIT_NOFILE, &rlim) < 0) {
log_err ("Failed to increase nofile limit");
return -1;
}
return 0;
}
int main (int argc, char *argv[])
{
broker_ctx_t ctx;
sigset_t old_sigmask;
struct sigaction old_sigact_int;
struct sigaction old_sigact_term;
flux_msg_handler_t **handlers = NULL;
const char *boot_method;
memset (&ctx, 0, sizeof (ctx));
log_init (argv[0]);
if (!(ctx.sigwatchers = zlist_new ()))
oom ();
if (!(ctx.modhash = modhash_create ()))
oom ();
if (!(ctx.services = service_switch_create ()))
oom ();
if (!(ctx.overlay = overlay_create ()))
oom ();
if (!(ctx.hello = hello_create ()))
oom ();
if (!(ctx.heartbeat = heartbeat_create ()))
oom ();
if (!(ctx.attrs = attr_create ()))
oom ();
if (!(ctx.subscriptions = zlist_new ()))
oom ();
if (!(ctx.cache = content_cache_create ()))
oom ();
if (!(ctx.runlevel = runlevel_create ()))
oom ();
if (!(ctx.publisher = publisher_create ()))
oom ();
ctx.tbon_k = 2; /* binary TBON is default */
/* Record the instance owner: the effective uid of the broker. */
ctx.cred.userid = geteuid ();
/* Set default rolemask for messages sent with flux_send()
* on the broker's internal handle. */
ctx.cred.rolemask = FLUX_ROLE_OWNER;
ctx.heartbeat_rate = 2;
ctx.sec_typemask = ZSECURITY_TYPE_CURVE;
init_attrs (ctx.attrs, getpid ());
parse_command_line_arguments (argc, argv, &ctx);
/* Block all signals, saving old mask and actions for SIGINT, SIGTERM.
*/
sigset_t sigmask;
sigfillset (&sigmask);
if (sigprocmask (SIG_SETMASK, &sigmask, &old_sigmask) < 0)
log_err_exit ("sigprocmask");
if (sigaction (SIGINT, NULL, &old_sigact_int) < 0)
log_err_exit ("sigaction");
if (sigaction (SIGTERM, NULL, &old_sigact_term) < 0)
log_err_exit ("sigaction");
/* Initailize zeromq context
*/
if (!zsys_init ()) {
log_err ("zsys_init");
goto cleanup;
}
zsys_set_logstream (stderr);
zsys_set_logident ("flux-broker");
zsys_handler_set (NULL);
zsys_set_linger (5);
zsys_set_rcvhwm (0);
zsys_set_sndhwm (0);
/* Set up the flux reactor.
*/
if (!(ctx.reactor = flux_reactor_create (FLUX_REACTOR_SIGCHLD))) {
log_err ("flux_reactor_create");
goto cleanup;
}
/* Set up flux handle.
* The handle is used for simple purposes such as logging.
*/
if (!(ctx.h = flux_handle_create (&ctx, &broker_handle_ops, 0))) {
log_err ("flux_handle_create");
goto cleanup;
}
if (flux_set_reactor (ctx.h, ctx.reactor) < 0) {
log_err ("flux_set_reactor");
goto cleanup;
}
if (increase_rlimits () < 0)
goto cleanup;
/* Parse config file(s). The result is cached in ctx.h.
*/
if (parse_config_files (ctx.h) < 0)
goto cleanup;
/* Prepare signal handling
*/
if (broker_handle_signals (&ctx) < 0) {
log_err ("broker_handle_signals");
goto cleanup;
}
/* The first call to overlay_bind() or overlay_connect() calls
* zsecurity_comms_init(). Delay calling zsecurity_comms_init()
* so that we can defer creating the libzmq work thread until we
* are ready to communicate.
*/
const char *keydir;
if (attr_get (ctx.attrs, "security.keydir", &keydir, NULL) < 0) {
log_err ("getattr security.keydir");
goto cleanup;
}
if (overlay_set_flux (ctx.overlay, ctx.h) < 0) {
log_err ("overlay_set_flux");
goto cleanup;
}
if (overlay_setup_sec (ctx.overlay, ctx.sec_typemask, keydir) < 0) {
log_err ("overlay_setup_sec");
goto cleanup;
}
overlay_set_parent_cb (ctx.overlay, parent_cb, &ctx);
overlay_set_child_cb (ctx.overlay, child_cb, &ctx);
/* Arrange for the publisher to route event messages.
* handle_event - local subscribers (ctx.h)
*/
if (publisher_set_flux (ctx.publisher, ctx.h) < 0) {
log_err ("publisher_set_flux");
goto cleanup;
}
if (publisher_set_sender (ctx.publisher, "handle_event",
(publisher_send_f)handle_event, &ctx) < 0) {
log_err ("publisher_set_sender");
goto cleanup;
}
if (create_rundir (ctx.attrs) < 0) {
log_err ("create_rundir");
goto cleanup;
}
/* Set & create broker.rundir *after* overlay initialization,
* when broker rank is determined.
*/
overlay_set_init_callback (ctx.overlay, create_broker_rundir, ctx.attrs);
/* Execute boot method selected by 'boot.method' attr.
* Default is pmi.
*/
if (attr_get (ctx.attrs, "boot.method", &boot_method, NULL) < 0) {
boot_method = "pmi";
if (attr_add (ctx.attrs, "boot.method", boot_method, 0)) {
log_err ("setattr boot.method");
goto cleanup;
}
}
if (attr_set_flags (ctx.attrs,
"boot.method",
FLUX_ATTRFLAG_IMMUTABLE) < 0) {
log_err ("attr_set_flags boot.method");
goto cleanup;
}
if (!strcmp (boot_method, "config")) {
if (boot_config (ctx.h, ctx.overlay, ctx.attrs, ctx.tbon_k) < 0) {
log_msg ("bootstrap failed");
goto cleanup;
}
}
else if (!strcmp (boot_method, "pmi")) {
double elapsed_sec;
struct timespec start_time;
monotime (&start_time);
if (boot_pmi (ctx.overlay, ctx.attrs, ctx.tbon_k) < 0) {
log_msg ("bootstrap failed");
goto cleanup;
}
elapsed_sec = monotime_since (start_time) / 1000;
flux_log (ctx.h, LOG_INFO, "pmi: bootstrap time %.1fs", elapsed_sec);
}
else {
log_err ("unknown boot method: %s", boot_method);
goto cleanup;
}
uint32_t rank = overlay_get_rank (ctx.overlay);
uint32_t size = overlay_get_size (ctx.overlay);
assert (size > 0);
/* Must be called after overlay setup */
if (overlay_register_attrs (ctx.overlay, ctx.attrs) < 0) {
log_err ("registering overlay attributes");
goto cleanup;
}
if (ctx.verbose)
log_msg ("boot: rank=%d size=%d", rank, size);
// Setup profiling
setup_profiling (argv[0], rank);
/* If persist-filesystem or persist-directory are set, initialize those,
* but only on rank 0.
*/
if (create_persistdir (ctx.attrs, rank) < 0) {
log_err ("create_persistdir");
goto cleanup;
}
/* Initialize logging.
* OK to call flux_log*() after this.
*/
logbuf_initialize (ctx.h, rank, ctx.attrs);
/* Allow flux_get_rank() and flux_get_size() to work in the broker.
*/
if (create_dummyattrs (ctx.h, rank, size) < 0) {
log_err ("creating dummy attributes");
goto cleanup;
}
/* Registers message handlers and obtains rank.
*/
if (content_cache_set_flux (ctx.cache, ctx.h) < 0) {
log_err ("content_cache_set_flux");
goto cleanup;
}
if (content_cache_register_attrs (ctx.cache, ctx.attrs) < 0) {
log_err ("content cache attributes");
goto cleanup;
}
if (ctx.verbose) {
const char *parent = overlay_get_parent (ctx.overlay);
const char *child = overlay_get_child (ctx.overlay);
log_msg ("parent: %s", parent ? parent : "none");
log_msg ("child: %s", child ? child : "none");
}
set_proctitle (rank);
if (rank == 0) {
const char *rc1, *rc3, *pmi, *uri;
const char *rc2 = ctx.init_shell_cmd;
size_t rc2_len = ctx.init_shell_cmd_len;
if (runlevel_register_attrs (ctx.runlevel, ctx.attrs) < 0) {
log_err ("configuring runlevel attributes");
goto cleanup;
}
if (attr_get (ctx.attrs, "local-uri", &uri, NULL) < 0) {
log_err ("local-uri is not set");
goto cleanup;
}
if (attr_get (ctx.attrs, "broker.rc1_path", &rc1, NULL) < 0) {
log_err ("conf.rc1_path is not set");
goto cleanup;
}
if (attr_get (ctx.attrs, "broker.rc3_path", &rc3, NULL) < 0) {
log_err ("conf.rc3_path is not set");
goto cleanup;
}
if (attr_get (ctx.attrs, "conf.pmi_library_path", &pmi, NULL) < 0) {
log_err ("conf.pmi_library_path is not set");
goto cleanup;
}
runlevel_set_size (ctx.runlevel, size);
runlevel_set_callback (ctx.runlevel, runlevel_cb, &ctx);
runlevel_set_io_callback (ctx.runlevel, runlevel_io_cb, &ctx);
runlevel_set_flux (ctx.runlevel, ctx.h);
if (runlevel_set_rc (ctx.runlevel,
1,
rc1,
rc1 ? strlen (rc1) + 1 : 0,
uri) < 0) {
log_err ("runlevel_set_rc 1");
goto cleanup;
}
if (runlevel_set_rc (ctx.runlevel,
2,
rc2,
rc2_len,
uri) < 0) {
log_err ("runlevel_set_rc 2");
goto cleanup;
}
if (runlevel_set_rc (ctx.runlevel,
3,
rc3,
rc3 ? strlen (rc3) + 1 : 0,
uri) < 0) {
log_err ("runlevel_set_rc 3");
goto cleanup;
}
}
/* If Flux was launched by Flux, now that PMI bootstrap and runlevel
* initialization is complete, unset Flux job environment variables
* so that they don't leak into the jobs other children of this instance.
*/
unsetenv ("FLUX_JOB_ID");
unsetenv ("FLUX_JOB_SIZE");
unsetenv ("FLUX_JOB_NNODES");
/* Wire up the overlay.
*/
if (rank > 0) {
if (ctx.verbose)
log_msg ("initializing overlay connect");
if (overlay_connect (ctx.overlay) < 0) {
log_err ("overlay_connect");
goto cleanup;
}
}
if (!(ctx.shutdown = shutdown_create (ctx.h,
ctx.shutdown_grace,
size,
ctx.tbon_k,
ctx.overlay))) {
log_err ("shutdown_create");
goto cleanup;
}
shutdown_set_callback (ctx.shutdown, shutdown_cb, &ctx);
/* Register internal services
*/
if (attr_register_handlers (ctx.attrs, ctx.h) < 0) {
log_err ("attr_register_handlers");
goto cleanup;
}
if (heaptrace_initialize (ctx.h) < 0) {
log_err ("heaptrace_initialize");
goto cleanup;
}
if (exec_initialize (ctx.h, rank, ctx.attrs) < 0) {
log_err ("exec_initialize");
goto cleanup;
}
if (ping_initialize (ctx.h, "cmb") < 0) {
log_err ("ping_initialize");
goto cleanup;
}
if (rusage_initialize (ctx.h, "cmb") < 0) {
log_err ("rusage_initialize");
goto cleanup;
}
if (!(handlers = broker_add_services (&ctx))) {
log_err ("broker_add_services");
goto cleanup;
}
/* Initialize comms module infrastructure.
*/
if (ctx.verbose)
log_msg ("initializing modules");
modhash_set_rank (ctx.modhash, rank);
modhash_set_flux (ctx.modhash, ctx.h);
modhash_set_heartbeat (ctx.modhash, ctx.heartbeat);
/* install heartbeat (including timer on rank 0)
*/
heartbeat_set_flux (ctx.heartbeat, ctx.h);
if (heartbeat_register_attrs (ctx.heartbeat, ctx.attrs) < 0) {
log_err ("initializing heartbeat attributes");
goto cleanup;
}
if (heartbeat_set_rate (ctx.heartbeat, ctx.heartbeat_rate) < 0) {
log_err ("heartbeat_set_rate");
goto cleanup;
}
if (heartbeat_start (ctx.heartbeat) < 0) {
log_err ("heartbeat_start");
goto cleanup;
}
if (rank == 0 && ctx.verbose)
log_msg ("installing session heartbeat: T=%0.1fs",
heartbeat_get_rate (ctx.heartbeat));
/* Send hello message to parent.
* N.B. uses tbon topology attributes set above.
* Start init once wireup is complete.
*/
hello_set_flux (ctx.hello, ctx.h);
hello_set_callback (ctx.hello, hello_update_cb, &ctx);
if (hello_register_attrs (ctx.hello, ctx.attrs) < 0) {
log_err ("configuring hello attributes");
goto cleanup;
}
if (hello_start (ctx.hello) < 0) {
log_err ("hello_start");
goto cleanup;
}
/* Load the local connector module.
* Other modules will be loaded in rc1 using flux module,
* which uses the local connector.
* The shutdown protocol unloads it.
*/
if (ctx.verbose)
log_msg ("loading connector-local");
if (load_module_byname (&ctx, "connector-local", NULL, 0, NULL) < 0) {
log_err ("load_module connector-local");
goto cleanup;
}
/* Event loop
*/
if (ctx.verbose)
log_msg ("entering event loop");
/* Once we enter the reactor, default exit_rc is now 0 */
exit_rc = 0;
if (flux_reactor_run (ctx.reactor, 0) < 0)
log_err ("flux_reactor_run");
if (ctx.verbose)
log_msg ("exited event loop");
/* inform all lingering subprocesses we are tearing down. Do this
* before any cleanup/teardown below, as this call will re-enter
* the reactor.
*/
exec_terminate_subprocesses (ctx.h);
cleanup:
if (ctx.verbose)
log_msg ("cleaning up");
/* Restore default sigmask and actions for SIGINT, SIGTERM
*/
if (sigprocmask (SIG_SETMASK, &old_sigmask, NULL) < 0)
log_err ("sigprocmask");
if (sigaction (SIGINT, &old_sigact_int, NULL) < 0)
log_err ("sigaction");
if (sigaction (SIGTERM, &old_sigact_term, NULL) < 0)
log_err ("sigaction");
/* remove heartbeat timer, if any
*/
heartbeat_stop (ctx.heartbeat);
/* Unregister builtin services
*/
attr_destroy (ctx.attrs);
content_cache_destroy (ctx.cache);
modhash_destroy (ctx.modhash);
zlist_destroy (&ctx.sigwatchers);
overlay_destroy (ctx.overlay);
heartbeat_destroy (ctx.heartbeat);
service_switch_destroy (ctx.services);
hello_destroy (ctx.hello);
shutdown_destroy (ctx.shutdown);
broker_remove_services (handlers);
publisher_destroy (ctx.publisher);
flux_close (ctx.h);
flux_reactor_destroy (ctx.reactor);
zlist_destroy (&ctx.subscriptions);
runlevel_destroy (ctx.runlevel);
free (ctx.init_shell_cmd);
return exit_rc;
}
struct attrmap {
const char *env;
const char *attr;
uint8_t required:1;
uint8_t sanitize:1;
};
static struct attrmap attrmap[] = {
{ "FLUX_EXEC_PATH", "conf.exec_path", 1, 0 },
{ "FLUX_CONNECTOR_PATH", "conf.connector_path", 1, 0 },
{ "FLUX_MODULE_PATH", "conf.module_path", 1, 0 },
{ "FLUX_PMI_LIBRARY_PATH", "conf.pmi_library_path", 1, 0 },
{ "FLUX_SEC_DIRECTORY", "security.keydir", 1, 0 },
{ "FLUX_URI", "parent-uri", 0, 1 },
{ "FLUX_KVS_NAMESPACE", "parent-kvs-namespace", 0, 1 },
{ NULL, NULL, 0, 0 },
};
static void init_attrs_from_environment (attr_t *attrs)
{
struct attrmap *m;
const char *val;
int flags = 0; // XXX possibly these should be immutable?
for (m = &attrmap[0]; m->env != NULL; m++) {
val = getenv (m->env);
if (!val && m->required)
log_msg_exit ("required environment variable %s is not set", m->env);
if (attr_add (attrs, m->attr, val, flags) < 0)
log_err_exit ("attr_add %s", m->attr);
if (m->sanitize)
unsetenv (m->env);
}
}
static void init_attrs_broker_pid (attr_t *attrs, pid_t pid)
{
char *attrname = "broker.pid";
char *pidval;
pidval = xasprintf ("%u", pid);
if (attr_add (attrs,
attrname,
pidval,
FLUX_ATTRFLAG_IMMUTABLE) < 0)
log_err_exit ("attr_add %s", attrname);
free (pidval);
}
static void init_attrs_rc_paths (attr_t *attrs)
{
if (attr_add (attrs,
"broker.rc1_path",
flux_conf_builtin_get ("rc1_path", FLUX_CONF_AUTO),
0) < 0)
log_err_exit ("attr_add rc1_path");
if (attr_add (attrs,
"broker.rc3_path",
flux_conf_builtin_get ("rc3_path", FLUX_CONF_AUTO),
0) < 0)
log_err_exit ("attr_add rc3_path");
}
static void init_attrs (attr_t *attrs, pid_t pid)
{
/* Initialize config attrs from environment set up by flux(1)
*/
init_attrs_from_environment (attrs);
/* Initialize other miscellaneous attrs
*/
init_attrs_broker_pid (attrs, pid);
init_attrs_rc_paths (attrs);
if (attr_add (attrs, "version", FLUX_CORE_VERSION_STRING,
FLUX_ATTRFLAG_IMMUTABLE) < 0)
log_err_exit ("attr_add version");
}
/* Parse TOML config, emitting any parse error here.
* This will fail if no configuration exists.
*/
static int parse_config_files (flux_t *h)
{
flux_conf_error_t error;
if (flux_get_conf (h, &error) == NULL) {
if (error.lineno == -1)
log_err ("Config file error: %s%s%s",
error.filename,
*error.filename ? ": " : "",
error.errbuf);
else
log_err ("Config file error: %s:%d: %s",
error.filename,
error.lineno,
error.errbuf);
return -1;
}
return 0;
}
static void hello_update_cb (hello_t *hello, void *arg)
{
broker_ctx_t *ctx = arg;
if (hello_complete (hello)) {
flux_log (ctx->h, LOG_INFO, "wireup: %d/%d (complete) %.1fs",
hello_get_count (hello), overlay_get_size(ctx->overlay),
hello_get_time (hello));
flux_log (ctx->h, LOG_INFO, "Run level %d starting", 1);
overlay_set_idle_warning (ctx->overlay, 3);
if (runlevel_set_level (ctx->runlevel, 1) < 0)
log_err_exit ("runlevel_set_level 1");
/* FIXME: shutdown hello protocol */
} else {
flux_log (ctx->h, LOG_INFO, "wireup: %d/%d (incomplete) %.1fs",
hello_get_count (hello), overlay_get_size(ctx->overlay),
hello_get_time (hello));
}
}
/* If shutdown timeout has occured, exit immediately.
* If shutdown is beginning, start unload of connector-local module.
* If shutdown is ending, then IFF connector-local has finished
* unloading, stop the reactor. Otherwise module_status_cb() will do it.
*/
static void shutdown_cb (struct shutdown *s, void *arg)
{
broker_ctx_t *ctx = arg;
if (shutdown_is_expired (s)) {
log_msg ("shutdown timer expired on rank %"PRIu32,
overlay_get_rank (ctx->overlay));
_exit (1);
}
if (!shutdown_is_complete (s)) {
module_t *p;
if ((p = module_lookup_byname (ctx->modhash, "connector-local")))
module_stop (p);
}
else {
if (!module_lookup_byname (ctx->modhash, "connector-local"))
flux_reactor_stop (flux_get_reactor (ctx->h));
}
}
static void set_proctitle (uint32_t rank)
{
static char proctitle[32];
snprintf (proctitle, sizeof (proctitle), "flux-broker-%"PRIu32, rank);
(void)prctl (PR_SET_NAME, proctitle, 0, 0, 0);
}
/* Handle line by line output on stdout, stderr of runlevel subprocess.
*/
static void runlevel_io_cb (runlevel_t *r, const char *name,
const char *msg, void *arg)
{
broker_ctx_t *ctx = arg;
int loglevel = !strcmp (name, "stderr") ? LOG_ERR : LOG_INFO;
int runlevel = runlevel_get_level (r);
flux_log (ctx->h, loglevel, "rc%d: %s", runlevel, msg);
}
/* Handle completion of runlevel subprocess.
*/
static void runlevel_cb (runlevel_t *r, int level, int rc, double elapsed,
const char *exit_string, void *arg)
{
broker_ctx_t *ctx = arg;
int new_level = -1;
flux_log (ctx->h, rc == 0 ? LOG_INFO : LOG_ERR,
"Run level %d %s (rc=%d) %.1fs", level, exit_string, rc, elapsed);
switch (level) {
case 1: /* init completed */
if (rc != 0) {
exit_rc = rc;
new_level = 3;
} else
new_level = 2;
break;
case 2: /* initial program completed */
exit_rc = rc;
new_level = 3;
break;
case 3: /* finalization completed */
if (rc != 0 && exit_rc == 0)
exit_rc = rc;
shutdown_instance (ctx->shutdown); // initiate shutdown from rank 0
break;
}
if (new_level != -1) {
flux_log (ctx->h, LOG_INFO, "Run level %d starting", new_level);
if (runlevel_set_level (r, new_level) < 0)
log_err_exit ("runlevel_set_level %d", new_level);
}
}
static int create_dummyattrs (flux_t *h, uint32_t rank, uint32_t size)
{
char *rank_str = NULL;
char *size_str = NULL;
int rc = -1;
if (asprintf (&rank_str, "%"PRIu32, rank) < 0)
goto cleanup;
if (flux_attr_set_cacheonly (h, "rank", rank_str) < 0)
goto cleanup;
if (asprintf (&size_str, "%"PRIu32, size) < 0)
goto cleanup;
if (flux_attr_set_cacheonly (h, "size", size_str) < 0)
goto cleanup;
rc = 0;
cleanup:
free (rank_str);
free (size_str);
return rc;
}
/* Handle global rundir attribute.
*
* If not set, create a temporary directory and use it as the rundir.
* If set, attempt to create it if it doesn't exist. In either case,
* validate directory persmissions and set the rundir attribute
* immutable. If the rundir is created by this function it will be
* scheduled for later cleanup at broker exit. Pre-existing directories
* are left intact.
*/
static int create_rundir (attr_t *attrs)
{
const char *run_dir;
char *dir = NULL;
char *uri = NULL;
bool do_cleanup = true;
struct stat sb;
int rc = -1;
/* If rundir attribute isn't set, then create a temp directory
* and use that as rundir. If directory was set, try to create it if
* it doesn't exist. If directory was pre-existing, do not schedule
* the dir for auto-cleanup at broker exit.
*/
if (attr_get (attrs, "rundir", &run_dir, NULL) < 0) {
const char *tmpdir = getenv ("TMPDIR");
if (asprintf (&dir, "%s/flux-XXXXXX", tmpdir ? tmpdir : "/tmp") < 0)
goto done;
if (!(run_dir = mkdtemp (dir)))
goto done;
if (attr_add (attrs, "rundir", run_dir, 0) < 0)
goto done;
}
else if (mkdir (run_dir, 0700) < 0) {
if (errno != EEXIST)
goto done;
/* Do not cleanup directory if we did not create it here
*/
do_cleanup = false;
}
/* Ensure created or existing directory is writeable:
*/
if (stat (run_dir, &sb) < 0)
goto done;
if (!S_ISDIR (sb.st_mode)) {
errno = ENOTDIR;
goto done;
}
if ((sb.st_mode & S_IRWXU) != S_IRWXU) {
errno = EPERM;
goto done;
}
/* rundir is now fixed, so make the attribute immutable, and
* schedule the dir for cleanup at exit if we created it here.
*/
if (attr_set_flags (attrs, "rundir", FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
if (do_cleanup)
cleanup_push_string (cleanup_directory_recursive, run_dir);
rc = 0;
done:
free (dir);
free (uri);
return rc;
}
static int create_broker_rundir (overlay_t *ov, void *arg)
{
attr_t *attrs = arg;
uint32_t rank;
const char *rundir;
const char *local_uri;
char *broker_rundir = NULL;
char *uri = NULL;
int rv = -1;
if (attr_get (attrs, "rundir", &rundir, NULL) < 0) {
log_msg ("create_broker_rundir: rundir attribute not set");
goto cleanup;
}
rank = overlay_get_rank (ov);
if (asprintf (&broker_rundir, "%s/%u", rundir, rank) < 0) {
log_err ("create_broker_rundir: asprintf");
goto cleanup;
}
if (mkdir (broker_rundir, 0700) < 0) {
log_err ("create_broker_rundir: mkdir (%s)", broker_rundir);
goto cleanup;
}
if (attr_add (attrs, "broker.rundir", broker_rundir,
FLUX_ATTRFLAG_IMMUTABLE) < 0) {
log_err ("create_broker_rundir: attr_add broker.rundir");
goto cleanup;
}
if (attr_get (attrs, "local-uri", &local_uri, NULL) < 0) {
if (asprintf (&uri, "local://%s/local", broker_rundir) < 0) {
log_err ("create_broker_rundir: asprintf (uri)");
goto cleanup;
}
if (attr_add (attrs, "local-uri", uri, FLUX_ATTRFLAG_IMMUTABLE) < 0) {
log_err ("create_broker_rundir: attr_add (local-uri)");
goto cleanup;
}
}
rv = 0;
cleanup:
free (uri);
free (broker_rundir);
return rv;
}
/* If 'persist-directory' set, validate it, make it immutable, done.
* If 'persist-filesystem' set, validate it, make it immutable, then:
* Avoid name collisions with other flux tmpdirs used in testing
* e.g. "flux-<pid>-XXXXXX"
*/
static int create_persistdir (attr_t *attrs, uint32_t rank)
{
struct stat sb;
const char *attr = "persist-directory";
const char *persist_dir, *persist_fs;
char *dir, *tmpl = NULL;
int rc = -1;
if (rank > 0) {
(void) attr_delete (attrs, "persist-filesystem", true);
(void) attr_delete (attrs, "persist-directory", true);
goto done_success;
}
if (attr_get (attrs, attr, &persist_dir, NULL) == 0) {
if (stat (persist_dir, &sb) < 0)
goto done;
if (!S_ISDIR (sb.st_mode)) {
errno = ENOTDIR;
goto done;
}
if ((sb.st_mode & S_IRWXU) != S_IRWXU) {
errno = EPERM;
goto done;
}
if (attr_set_flags (attrs, attr, FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
} else {
if (attr_get (attrs, "persist-filesystem", &persist_fs, NULL)< 0) {
goto done_success;
}
if (stat (persist_fs, &sb) < 0)
goto done;
if (!S_ISDIR (sb.st_mode)) {
errno = ENOTDIR;
goto done;
}
if ((sb.st_mode & S_IRWXU) != S_IRWXU) {
errno = EPERM;
goto done;
}
if (attr_set_flags (attrs, "persist-filesystem",
FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
if (asprintf (&tmpl,
"%s/fluxP-%d-XXXXXX",
persist_fs,
(int)getpid()) < 0)
goto done;
if (!(dir = mkdtemp (tmpl)))
goto done;
if (attr_add (attrs, attr, dir, FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
}
done_success:
if (attr_get (attrs, "persist-filesystem", NULL, NULL) < 0) {
if (attr_add (attrs, "persist-filesystem", NULL,
FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
}
if (attr_get (attrs, "persist-directory", NULL, NULL) < 0) {
if (attr_add (attrs, "persist-directory", NULL,
FLUX_ATTRFLAG_IMMUTABLE) < 0)
goto done;
}
rc = 0;
done:
if (tmpl)
free (tmpl);
return rc;
}
static bool nodeset_member (const char *s, uint32_t rank)
{
struct idset *ns = NULL;
bool member = true;
if (s) {
if (!(ns = idset_decode (s)))
log_msg_exit ("malformed nodeset: %s", s);
member = idset_test (ns, rank);
idset_destroy (ns);
}
return member;
}
static int mod_svc_cb (const flux_msg_t *msg, void *arg)
{
module_t *p = arg;
int rc = module_sendmsg (p, msg);
return rc;
}
/* If a dlerror/dlsym error occurs during modfind/modname,
* log it here. Such messages can be helpful in diagnosing
* dynamic binding problems for comms modules.
*/
static void module_dlerror (const char *errmsg, void *arg)
{
flux_t *h = arg;
flux_log (h, LOG_DEBUG, "flux_modname: %s", errmsg);
}
static int load_module_bypath (broker_ctx_t *ctx, const char *path,
const char *argz, size_t argz_len,
const flux_msg_t *request)
{
module_t *p = NULL;
char *name, *arg;
if (!(name = flux_modname (path, module_dlerror, ctx->h))) {
errno = ENOENT;
goto error;
}
if (!(p = module_add (ctx->modhash, path)))
goto error;
if (service_add (ctx->services, module_get_name (p),
module_get_uuid (p), mod_svc_cb, p) < 0)
goto module_remove;
arg = argz_next (argz, argz_len, NULL);
while (arg) {
module_add_arg (p, arg);
arg = argz_next (argz, argz_len, arg);
}
module_set_poller_cb (p, module_cb, ctx);
module_set_status_cb (p, module_status_cb, ctx);
if (request && module_push_insmod (p, request) < 0) // response deferred
goto service_remove;
if (module_start (p) < 0)
goto service_remove;
flux_log (ctx->h, LOG_DEBUG, "insmod %s", name);
free (name);
return 0;
service_remove:
service_remove_byuuid (ctx->services, module_get_uuid (p));
module_remove:
module_remove (ctx->modhash, p);
error:
free (name);
return -1;
}
static int load_module_byname (broker_ctx_t *ctx, const char *name,
const char *argz, size_t argz_len,
const flux_msg_t *request)
{
const char *modpath;
char *path;
if (attr_get (ctx->attrs, "conf.module_path", &modpath, NULL) < 0) {
log_msg ("conf.module_path is not set");
return -1;
}
if (!(path = flux_modfind (modpath, name, module_dlerror, ctx->h))) {
log_msg ("%s: not found in module search path", name);
return -1;
}
if (load_module_bypath (ctx, path, argz, argz_len, request) < 0) {
free (path);
return -1;
}
free (path);
return 0;
}
static int unload_module_byname (broker_ctx_t *ctx, const char *name,
const flux_msg_t *request)
{
module_t *p;
if (!(p = module_lookup_byname (ctx->modhash, name))) {
errno = ENOENT;
return -1;
}
if (module_stop (p) < 0)
return -1;
if (module_push_rmmod (p, request) < 0)
return -1;
flux_log (ctx->h, LOG_DEBUG, "rmmod %s", name);
return 0;
}
static void broker_destroy_sigwatcher (void *data)
{
flux_watcher_t *w = data;
flux_watcher_stop (w);
flux_watcher_destroy (w);
}
static int broker_handle_signals (broker_ctx_t *ctx)
{
int i, sigs[] = { SIGHUP, SIGINT, SIGQUIT, SIGTERM, SIGSEGV, SIGFPE,
SIGALRM };
flux_watcher_t *w;
for (i = 0; i < sizeof (sigs) / sizeof (sigs[0]); i++) {
w = flux_signal_watcher_create (ctx->reactor, sigs[i], signal_cb, ctx);
if (!w) {
log_err ("flux_signal_watcher_create");
return -1;
}
if (zlist_push (ctx->sigwatchers, w) < 0) {
log_errn (ENOMEM, "zlist_push");
return -1;
}
zlist_freefn (ctx->sigwatchers, w, broker_destroy_sigwatcher, false);
flux_watcher_start (w);
}
return 0;
}
/**
** Built-in services
**/
/* Unload a comms module by name, asynchronously.
* Message format is defined by RFC 5.
* N.B. unload_module_byname() handles response, unless it fails early
* and returns -1.
*/
static void cmb_rmmod_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
const char *name;
if (flux_request_unpack (msg, NULL, "{s:s}", "name", &name) < 0)
goto error;
if (unload_module_byname (ctx, name, msg) < 0)
goto error;
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
}
/* Load a comms module by name, asynchronously.
* Message format is defined by RFC 5.
* N.B. load_module_bypath() handles response, unless it returns -1.
*/
static void cmb_insmod_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
const char *path;
json_t *args;
size_t index;
json_t *value;
char *argz = NULL;
size_t argz_len = 0;
error_t e;
if (flux_request_unpack (msg, NULL, "{s:s s:o}", "path", &path,
"args", &args) < 0)
goto error;
if (!json_is_array (args))
goto proto;
json_array_foreach (args, index, value) {
if (!json_is_string (value))
goto proto;
if ((e = argz_add (&argz, &argz_len, json_string_value (value)))) {
errno = e;
goto error;
}
}
if (load_module_bypath (ctx, path, argz, argz_len, msg) < 0)
goto error;
free (argz);
return;
proto:
errno = EPROTO;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
free (argz);
}
/* Load a comms module by name.
* Message format is defined by RFC 5.
*/
static void cmb_lsmod_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
json_t *mods = NULL;
if (flux_request_decode (msg, NULL, NULL) < 0)
goto error;
if (!(mods = module_get_modlist (ctx->modhash, ctx->services)))
goto error;
if (flux_respond_pack (h, msg, "{s:O}", "mods", mods) < 0)
flux_log_error (h, "%s: flux_respond_pack", __FUNCTION__);
json_decref (mods);
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
}
static void cmb_lspeer_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
char *out;
if (!(out = overlay_lspeer_encode (ctx->overlay))) {
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
return;
}
if (flux_respond (h, msg, out) < 0)
flux_log_error (h, "%s: flux_respond", __FUNCTION__);
free (out);
}
#if CODE_COVERAGE_ENABLED
void __gcov_flush (void);
#endif
static void cmb_panic_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
const char *reason;
int flags; // reserved
if (flux_request_unpack (msg, NULL, "{s:s s:i}",
"reason", &reason,
"flags", &flags) < 0) {
flux_log_error (h, "malformed cmb.panic request");
return;
}
fprintf (stderr, "PANIC: %s\n", reason);
#if CODE_COVERAGE_ENABLED
__gcov_flush ();
#endif
_exit (1);
/*NOTREACHED*/
}
static void cmb_disconnect_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
char *sender = NULL;
if (flux_msg_get_route_first (msg, &sender) == 0) {
exec_terminate_subprocesses_by_uuid (h, sender);
free (sender);
}
/* no response */
}
static void cmb_sub_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
char *uuid = NULL;
const char *topic;
if (flux_request_unpack (msg, NULL, "{ s:s }", "topic", &topic) < 0)
goto error;
if (flux_msg_get_route_first (msg, &uuid) < 0)
goto error;
if (!uuid) {
errno = EPROTO;
goto error;
}
if (module_subscribe (ctx->modhash, uuid, topic) < 0)
goto error;
if (flux_respond (h, msg, NULL) < 0)
flux_log_error (h, "%s: flux_respond", __FUNCTION__);
free (uuid);
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
free (uuid);
}
static void cmb_unsub_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
char *uuid = NULL;
const char *topic;
if (flux_request_unpack (msg, NULL, "{ s:s }", "topic", &topic) < 0)
goto error;
if (flux_msg_get_route_first (msg, &uuid) < 0)
goto error;
if (!uuid) {
errno = EPROTO;
goto error;
}
if (module_unsubscribe (ctx->modhash, uuid, topic) < 0)
goto error;
if (flux_respond (h, msg, NULL) < 0)
flux_log_error (h, "%s: flux_respond", __FUNCTION__);
free (uuid);
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "%s: flux_respond_error", __FUNCTION__);
free (uuid);
}
static int route_to_handle (const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
if (flux_requeue (ctx->h, msg, FLUX_RQ_TAIL) < 0)
flux_log_error (ctx->h, "%s: flux_requeue\n", __FUNCTION__);
return 0;
}
/* Dynamic service registration.
* These handlers need to appear in broker.c so that they have
* access to broker internals like modhash
*/
static void service_add_cb (flux_t *h, flux_msg_handler_t *w,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
const char *name = NULL;
char *sender = NULL;
module_t *p;
if (flux_request_unpack (msg, NULL, "{ s:s }", "service", &name) < 0)
goto error;
if (flux_msg_get_route_first (msg, &sender) < 0)
goto error;
if (!(p = module_lookup (ctx->modhash, sender))) {
errno = ENOENT;
goto error;
}
if (service_add (ctx->services, name, sender, mod_svc_cb, p) < 0)
goto error;
if (flux_respond (h, msg, NULL) < 0)
flux_log_error (h, "service_add: flux_respond");
free (sender);
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "service_add: flux_respond_error");
free (sender);
}
static void service_remove_cb (flux_t *h, flux_msg_handler_t *w,
const flux_msg_t *msg, void *arg)
{
broker_ctx_t *ctx = arg;
const char *name;
const char *uuid;
char *sender = NULL;
if (flux_request_unpack (msg, NULL, "{ s:s }", "service", &name) < 0)
goto error;
if (flux_msg_get_route_first (msg, &sender) < 0)
goto error;
if (!(uuid = service_get_uuid (ctx->services, name))) {
errno = ENOENT;
goto error;
}
if (strcmp (uuid, sender) != 0) {
errno = EINVAL;
goto error;
}
service_remove (ctx->services, name);
if (flux_respond (h, msg, NULL) < 0)
flux_log_error (h, "service_remove: flux_respond");
free (sender);
return;
error:
if (flux_respond_error (h, msg, errno, NULL) < 0)
flux_log_error (h, "service_remove: flux_respond_error");
free (sender);
}
static const struct flux_msg_handler_spec htab[] = {
{ FLUX_MSGTYPE_REQUEST, "cmb.rmmod", cmb_rmmod_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.insmod", cmb_insmod_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.lsmod", cmb_lsmod_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.lspeer", cmb_lspeer_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.panic", cmb_panic_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.disconnect", cmb_disconnect_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.sub", cmb_sub_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "cmb.unsub", cmb_unsub_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "service.add", service_add_cb, 0 },
{ FLUX_MSGTYPE_REQUEST, "service.remove", service_remove_cb, 0 },
FLUX_MSGHANDLER_TABLE_END,
};
struct internal_service {
const char *name;
const char *nodeset;
};
static struct internal_service services[] = {
{ "cmb", NULL }, // kind of a catch-all, slowly deprecating
{ "log", NULL },
{ "seq", "[0]" },
{ "content", NULL },
{ "hello", NULL },
{ "attr", NULL },
{ "heaptrace", NULL },
{ "event", "[0]" },
{ "service", NULL },
{ NULL, NULL, },
};
/* Register builtin services (sharing ctx->h and broker thread).
* Register message handlers for some cmb services. Others are registered
* in their own initialization functions.
*/
static flux_msg_handler_t **broker_add_services (broker_ctx_t *ctx)
{
flux_msg_handler_t **handlers;
struct internal_service *svc;
for (svc = &services[0]; svc->name != NULL; svc++) {
if (!nodeset_member (svc->nodeset, overlay_get_rank (ctx->overlay)))
continue;
if (service_add (ctx->services, svc->name, NULL,
route_to_handle, ctx) < 0) {
log_err ("error registering service for %s", svc->name);
return NULL;
}
}
if (flux_msg_handler_addvec (ctx->h, htab, ctx, &handlers) < 0) {
log_err ("error registering message handlers");
return NULL;
}
return handlers;
}
/* Unregister message handlers
*/
static void broker_remove_services (flux_msg_handler_t *handlers[])
{
flux_msg_handler_delvec (handlers);
}
/**
** reactor callbacks
**/
/* Handle requests from overlay peers.
*/
static void child_cb (overlay_t *ov, void *sock, void *arg)
{
broker_ctx_t *ctx = arg;
int type;
char *uuid = NULL;
flux_msg_t *msg = flux_msg_recvzsock (sock);
if (!msg)
goto done;
if (flux_msg_get_type (msg, &type) < 0)
goto done;
if (flux_msg_get_route_last (msg, &uuid) < 0)
goto done;
overlay_checkin_child (ctx->overlay, uuid);
switch (type) {
case FLUX_MSGTYPE_KEEPALIVE:
break;
case FLUX_MSGTYPE_REQUEST:
broker_request_sendmsg (ctx, msg);
break;
case FLUX_MSGTYPE_RESPONSE:
/* TRICKY: Fix up ROUTER socket used in reverse direction.
* Request/response is designed for requests to travel
* ROUTER->DEALER (up) and responses DEALER-ROUTER (down).
* When used conventionally, the route stack is accumulated
* automatically as a request is routed up, and unwound
* automatically as a response is routed down. When responses
* are routed up, ROUTER socket behavior must be subverted on
* the receiving end by popping two frames off of the stack and
* discarding.
*/
(void)flux_msg_pop_route (msg, NULL);
(void)flux_msg_pop_route (msg, NULL);
if (broker_response_sendmsg (ctx, msg) < 0)
goto done;
break;
case FLUX_MSGTYPE_EVENT:
(void)broker_event_sendmsg (ctx, msg);
break;
}
done:
if (uuid)
free (uuid);
flux_msg_destroy (msg);
}
/* Handle events received by parent_cb.
* On rank 0, publisher is wired to send events here also.
*/
static int handle_event (broker_ctx_t *ctx, const flux_msg_t *msg)
{
uint32_t seq;
const char *topic, *s;
if (flux_msg_get_seq (msg, &seq) < 0
|| flux_msg_get_topic (msg, &topic) < 0) {
flux_log (ctx->h, LOG_ERR, "dropping malformed event");
return -1;
}
if (seq <= ctx->event_recv_seq) {
//flux_log (ctx->h, LOG_DEBUG, "dropping duplicate event %d", seq);
return -1;
}
if (ctx->event_recv_seq > 0) { /* don't log initial missed events */
int first = ctx->event_recv_seq + 1;
int count = seq - first;
if (count > 1)
flux_log (ctx->h, LOG_ERR, "lost events %d-%d", first, seq - 1);
else if (count == 1)
flux_log (ctx->h, LOG_ERR, "lost event %d", first);
}
ctx->event_recv_seq = seq;
/* Forward to this rank's children.
*/
if (overlay_mcast_child (ctx->overlay, msg) < 0)
flux_log_error (ctx->h, "%s: overlay_mcast_child", __FUNCTION__);
/* Internal services may install message handlers for events.
*/
s = zlist_first (ctx->subscriptions);
while (s) {
if (!strncmp (s, topic, strlen (s))) {
if (flux_requeue (ctx->h, msg, FLUX_RQ_TAIL) < 0)
flux_log_error (ctx->h, "%s: flux_requeue\n", __FUNCTION__);
break;
}
s = zlist_next (ctx->subscriptions);
}
/* Finally, route to local module subscribers.
*/
return module_event_mcast (ctx->modhash, msg);
}
/* Handle messages from one or more parents.
*/
static void parent_cb (overlay_t *ov, void *sock, void *arg)
{
broker_ctx_t *ctx = arg;
flux_msg_t *msg = flux_msg_recvzsock (sock);
int type;
if (!msg)
goto done;
if (flux_msg_get_type (msg, &type) < 0)
goto done;
switch (type) {
case FLUX_MSGTYPE_RESPONSE:
if (broker_response_sendmsg (ctx, msg) < 0)
goto done;
break;
case FLUX_MSGTYPE_EVENT:
if (flux_msg_clear_route (msg) < 0) {
flux_log (ctx->h, LOG_ERR, "dropping malformed event");
goto done;
}
if (handle_event (ctx, msg) < 0)
goto done;
break;
case FLUX_MSGTYPE_REQUEST:
broker_request_sendmsg (ctx, msg);
break;
default:
flux_log (ctx->h, LOG_ERR, "%s: unexpected %s", __FUNCTION__,
flux_msg_typestr (type));
break;
}
done:
flux_msg_destroy (msg);
}
/* Handle messages on the service socket of a comms module.
*/
static void module_cb (module_t *p, void *arg)
{
broker_ctx_t *ctx = arg;
flux_msg_t *msg = module_recvmsg (p);
int type;
int ka_errnum, ka_status;
if (!msg)
goto done;
if (flux_msg_get_type (msg, &type) < 0)
goto done;
switch (type) {
case FLUX_MSGTYPE_RESPONSE:
(void)broker_response_sendmsg (ctx, msg);
break;
case FLUX_MSGTYPE_REQUEST:
broker_request_sendmsg (ctx, msg);
break;
case FLUX_MSGTYPE_EVENT:
if (broker_event_sendmsg (ctx, msg) < 0) {
flux_log_error (ctx->h, "%s(%s): broker_event_sendmsg %s",
__FUNCTION__, module_get_name (p),
flux_msg_typestr (type));
}
break;
case FLUX_MSGTYPE_KEEPALIVE:
if (flux_keepalive_decode (msg, &ka_errnum, &ka_status) < 0) {
flux_log_error (ctx->h, "%s: flux_keepalive_decode",
module_get_name (p));
break;
}
if (ka_status == FLUX_MODSTATE_FINALIZING) {
/* Module is finalizing and doesn't want any more messages.
* mute the module and respond with the same keepalive
* message for synchronization (module waits to proceed)
*/
module_mute (p);
if (module_sendmsg (p, msg) < 0)
flux_log_error (ctx->h,
"%s: reply to finalizing: module_sendmsg",
module_get_name (p));
}
if (ka_status == FLUX_MODSTATE_EXITED)
module_set_errnum (p, ka_errnum);
module_set_status (p, ka_status);
break;
default:
flux_log (ctx->h, LOG_ERR, "%s(%s): unexpected %s",
__FUNCTION__, module_get_name (p),
flux_msg_typestr (type));
break;
}
done:
flux_msg_destroy (msg);
}
static int module_insmod_respond (flux_t *h, module_t *p)
{
int rc;
int errnum = 0;
int status = module_get_status (p);
flux_msg_t *msg = module_pop_insmod (p);
if (msg == NULL)
return 0;
/* If the module is EXITED, return error to insmod if mod_main() < 0
*/
if (status == FLUX_MODSTATE_EXITED)
errnum = module_get_errnum (p);
if (errnum == 0)
rc = flux_respond (h, msg, NULL);
else
rc = flux_respond_error (h, msg, errnum, NULL);
flux_msg_destroy (msg);
return rc;
}
static int module_rmmod_respond (flux_t *h, module_t *p)
{
flux_msg_t *msg;
int rc = 0;
while ((msg = module_pop_rmmod (p))) {
if (flux_respond (h, msg, NULL) < 0)
rc = -1;
flux_msg_destroy (msg);
}
return rc;
}
static void module_status_cb (module_t *p, int prev_status, void *arg)
{
broker_ctx_t *ctx = arg;
int status = module_get_status (p);
const char *name = module_get_name (p);
/* Transition from INIT
* If module started normally, i.e. INIT->SLEEPING/RUNNING, then
* respond to insmod requests now. O/w, delay responses until
* EXITED, when any errnum is available.
*/
if (prev_status == FLUX_MODSTATE_INIT &&
(status == FLUX_MODSTATE_RUNNING ||
status == FLUX_MODSTATE_SLEEPING)) {
if (module_insmod_respond (ctx->h, p) < 0)
flux_log_error (ctx->h, "flux_respond to insmod %s", name);
}
/* Transition to EXITED
* Remove service routes, respond to insmod & rmmod request(s), if any,
* and remove the module (which calls pthread_join).
*/
if (status == FLUX_MODSTATE_EXITED) {
flux_log (ctx->h, LOG_DEBUG, "module %s exited", name);
service_remove_byuuid (ctx->services, module_get_uuid (p));
if (module_insmod_respond (ctx->h, p) < 0)
flux_log_error (ctx->h, "flux_respond to insmod %s", name);
if (module_rmmod_respond (ctx->h, p) < 0)
flux_log_error (ctx->h, "flux_respond to rmmod %s", name);
/* Special case for connector-local removal:
* If shutdown is complete, stop the reactor.
*/
if (!strcmp (name, "connector-local")) {
if (shutdown_is_complete (ctx->shutdown))
flux_reactor_stop (flux_get_reactor (ctx->h));
}
module_remove (ctx->modhash, p);
}
}
static void signal_cb (flux_reactor_t *r, flux_watcher_t *w,
int revents, void *arg)
{
broker_ctx_t *ctx = arg;
int rank = overlay_get_rank (ctx->overlay);
int signum = flux_signal_watcher_get_signum (w);
log_msg ("signal %d (%s) on rank %u", signum, strsignal (signum), rank);
_exit (1);
}
/* Send a request message down the TBON.
* N.B. this message is going from ROUTER socket to DEALER socket.
* Since ROUTER pops a route off the stack and uses it to select the peer,
* we must push *two* routes on the stack: the identity of this broker,
* then the identity the peer. The parent_cb() can then accept the request
* from DEALER as though it were received on ROUTER.
*/
static int sendmsg_child_request (broker_ctx_t *ctx,
const flux_msg_t *msg,
uint32_t nodeid)
{
flux_msg_t *cpy = flux_msg_copy (msg, true);
int saved_errno;
char uuid[16];
int rc = -1;
snprintf (uuid, sizeof (uuid), "%"PRIu32, overlay_get_rank (ctx->overlay));
if (flux_msg_push_route (cpy, uuid) < 0)
goto done;
snprintf (uuid, sizeof (uuid), "%"PRIu32, nodeid);
if (flux_msg_push_route (cpy, uuid) < 0)
goto done;
if (overlay_sendmsg_child (ctx->overlay, cpy) < 0)
goto done;
rc = 0;
done:
saved_errno = errno;
flux_msg_destroy (cpy);
errno = saved_errno;
return rc;
}
/* Route request.
* On success, return 0. On failure, return -1 with errno set.
*/
static int broker_request_sendmsg_internal (broker_ctx_t *ctx,
const flux_msg_t *msg)
{
uint32_t rank = overlay_get_rank (ctx->overlay);
uint32_t size = overlay_get_size (ctx->overlay);
uint32_t nodeid;
uint8_t flags;
if (flux_msg_get_nodeid (msg, &nodeid) < 0)
return -1;
if (flux_msg_get_flags (msg, &flags) < 0)
return -1;
/* Route up TBON if destination if upstream of this broker.
*/
if ((flags & FLUX_MSGFLAG_UPSTREAM) && nodeid == rank) {
if (overlay_sendmsg_parent (ctx->overlay, msg) < 0)
return -1;
}
/* Deliver to local service if destination *could* be this broker.
* If there is no such service locally (ENOSYS), route up TBON.
*/
else if (((flags & FLUX_MSGFLAG_UPSTREAM) && nodeid != rank)
|| nodeid == FLUX_NODEID_ANY) {
if (service_send (ctx->services, msg) < 0) {
if (errno != ENOSYS)
return -1;
if (overlay_sendmsg_parent (ctx->overlay, msg) < 0) {
if (errno == EHOSTUNREACH)
errno = ENOSYS;
return -1;
}
}
}
/* Deliver to local service if this broker is the addressed rank.
*/
else if (nodeid == rank) {
if (service_send (ctx->services, msg) < 0)
return -1;
}
/* Send the request up or down TBON as addressed.
*/
else {
uint32_t down_rank;
down_rank = kary_child_route (ctx->tbon_k, size, rank, nodeid);
if (down_rank == KARY_NONE) { // up
if (overlay_sendmsg_parent (ctx->overlay, msg) < 0)
return -1;
}
else { // down
if (sendmsg_child_request (ctx, msg, down_rank) < 0)
return -1;
}
}
return 0;
}
/* Route request. If there is an error routing the request,
* generate an error response. Make an extra effort to return a useful
* error message if ENOSYS indicates an unmatched service name.
*/
static void broker_request_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg)
{
if (broker_request_sendmsg_internal (ctx, msg) < 0) {
const char *topic;
char errbuf[64];
const char *errstr = NULL;
if (errno == ENOSYS && flux_msg_get_topic (msg, &topic) == 0) {
snprintf (errbuf,
sizeof (errbuf),
"No service matching %s is registered", topic);
errstr = errbuf;
}
if (flux_respond_error (ctx->h, msg, errno, errstr) < 0)
flux_log_error (ctx->h, "flux_respond");
}
}
/* Broker's use their rank in place of a UUID for message routing purposes.
* Try to convert a UUID from a message to a rank.
* It must be entirely numerical, and be less than 'size'.
* If it works, assign result to 'rank' and return true.
* If it doesn't return false.
*/
static bool uuid_to_rank (const char *s, uint32_t size, uint32_t *rank)
{
unsigned long num;
char *endptr;
if (!isdigit (*s))
return false;
errno = 0;
num = strtoul (s, &endptr, 10);
if (errno != 0)
return false;
if (*endptr != '\0')
return false;
if (num >= size)
return false;
*rank = num;
return true;
}
/* Test whether the TBON parent of this broker is 'rank'.
*/
static bool is_my_parent (broker_ctx_t *ctx, uint32_t rank)
{
if (kary_parentof (ctx->tbon_k, overlay_get_rank (ctx->overlay)) == rank)
return true;
return false;
}
/* Route a response message, determining next hop from route stack.
* If there is no next hop, routing is complete to broker-resident service.
* If the next hop is a rank, route up or down the TBON.
* If not a rank, look up a comms module by uuid.
*/
static int broker_response_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg)
{
int rc = -1;
char *uuid = NULL;
uint32_t rank;
if (flux_msg_get_route_last (msg, &uuid) < 0)
goto done;
if (uuid == NULL) { // broker resident service
if (flux_requeue (ctx->h, msg, FLUX_RQ_TAIL) < 0)
goto done;
}
else if (uuid_to_rank (uuid, overlay_get_size (ctx->overlay), &rank)) {
if (is_my_parent (ctx, rank)) {
/* N.B. this message is going from DEALER socket to ROUTER socket.
* Instead of popping a route off the stack, ROUTER pushes one
* on, so the upstream broker must to detect this case and pop
* *two* off to maintain route stack integrity. See child_cb().
*/
if (overlay_sendmsg_parent (ctx->overlay, msg) < 0)
goto done;
}
else {
if (overlay_sendmsg_child (ctx->overlay, msg) < 0) {
if (errno == EINVAL)
errno = EHOSTUNREACH;
goto done;
}
}
}
else {
if (module_response_sendmsg (ctx->modhash, msg) < 0)
goto done;
}
rc = 0;
done:
ERRNO_SAFE_WRAP (free, uuid);
return rc;
}
/* Events are forwarded up the TBON to rank 0, then published from there.
* (This mechanism predates and is separate from the "event.pub" service).
*/
static int broker_event_sendmsg (broker_ctx_t *ctx, const flux_msg_t *msg)
{
if (overlay_get_rank (ctx->overlay) > 0) {
flux_msg_t *cpy;
if (!(cpy = flux_msg_copy (msg, true)))
return -1;
if (flux_msg_enable_route (cpy) < 0) {
flux_msg_destroy (cpy);
return -1;
}
if (overlay_sendmsg_parent (ctx->overlay, cpy) < 0) {
flux_msg_destroy (cpy);
return -1;
}
flux_msg_destroy (cpy);
} else {
if (publisher_send (ctx->publisher, msg) < 0)
return -1;
}
return 0;
}
/**
** Broker's internal flux_t implementation
** N.B. recv() method is missing because messages are "received"
** when routing logic calls flux_requeue().
**/
static int broker_send (void *impl, const flux_msg_t *msg, int flags)
{
broker_ctx_t *ctx = impl;
int type;
struct flux_msg_cred cred;
flux_msg_t *cpy = NULL;
int rc = -1;
if (!(cpy = flux_msg_copy (msg, true)))
goto done;
if (flux_msg_get_type (cpy, &type) < 0)
goto done;
if (flux_msg_get_cred (cpy, &cred) < 0)
goto done;
if (cred.userid == FLUX_USERID_UNKNOWN)
cred.userid = ctx->cred.userid;
if (cred.rolemask == FLUX_ROLE_NONE)
cred.rolemask = ctx->cred.rolemask;
if (flux_msg_set_cred (cpy, cred) < 0)
goto done;
switch (type) {
case FLUX_MSGTYPE_REQUEST:
rc = broker_request_sendmsg_internal (ctx, cpy);
break;
case FLUX_MSGTYPE_RESPONSE:
rc = broker_response_sendmsg (ctx, cpy);
break;
case FLUX_MSGTYPE_EVENT:
rc = broker_event_sendmsg (ctx, cpy);
break;
default:
errno = EINVAL;
break;
}
done:
flux_msg_destroy (cpy);
return rc;
}
static int broker_subscribe (void *impl, const char *topic)
{
broker_ctx_t *ctx = impl;
char *cpy = NULL;
if (!(cpy = strdup (topic)))
goto nomem;
if (zlist_append (ctx->subscriptions, cpy) < 0)
goto nomem;
zlist_freefn (ctx->subscriptions, cpy, free, true);
return 0;
nomem:
free (cpy);
errno = ENOMEM;
return -1;
}
static int broker_unsubscribe (void *impl, const char *topic)
{
broker_ctx_t *ctx = impl;
char *s = zlist_first (ctx->subscriptions);
while (s) {
if (!strcmp (s, topic)) {
zlist_remove (ctx->subscriptions, s);
break;
}
s = zlist_next (ctx->subscriptions);
}
return 0;
}
static const struct flux_handle_ops broker_handle_ops = {
.send = broker_send,
.event_subscribe = broker_subscribe,
.event_unsubscribe = broker_unsubscribe,
};
#if HAVE_VALGRIND
/* Disable dlclose() during valgrind operation
*/
void I_WRAP_SONAME_FNNAME_ZZ(Za,dlclose)(void *dso) {}
#endif
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| 1 | 26,364 | minor nit - add "persist-filesystem" and "persist-directory" into commit message, as its something people may search on (maybe applies to a few other commit messages) | flux-framework-flux-core | c |
@@ -7662,8 +7662,6 @@ bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data
return skip;
}
-static const VkQueueFlagBits kQueueTypeArray[] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT};
-
bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
const char *error_code) const { | 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: John Zulauf <[email protected]>
* Author: Shannon McPherson <[email protected]>
* Author: Jeremy Kniager <[email protected]>
* Author: Tobias Hector <[email protected]>
*/
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
#include "command_counter.h"
#include "sync_utils.h"
static VkImageLayout NormalizeImageLayout(VkImageLayout layout, VkImageLayout non_normal, VkImageLayout normal) {
return (layout == non_normal) ? normal : layout;
}
static VkImageLayout NormalizeDepthImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL);
}
static VkImageLayout NormalizeStencilImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL);
}
bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayout a, VkImageLayout b) {
bool matches = (a == b);
if (!matches) {
// Relaxed rules when referencing *only* the depth or stencil aspects
if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b);
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
matches = NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
}
}
return matches;
}
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// Get the global maps of pending releases
const GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) const {
return qfo_release_image_barrier_map;
}
const GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) const {
return qfo_release_buffer_barrier_map;
}
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return qfo_release_buffer_barrier_map;
}
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) {
std::unique_ptr<ImageSubresourceLayoutMap> map(new ImageSubresourceLayoutMap(image_state));
return map;
}
// The const variant only need the image as it is the key for the map
const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) {
auto it = cb_state->image_layout_map.find(image);
if (it == cb_state->image_layout_map.cend()) {
return nullptr;
}
return it->second.get();
}
// The non-const variant only needs the image state, as the factory requires it to construct a new entry
ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) {
auto it = cb_state->image_layout_map.find(image_state.image);
if (it == cb_state->image_layout_map.end()) {
// Empty slot... fill it in.
auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state)));
assert(insert_pair.second);
ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get();
assert(new_map);
return new_map;
}
return it->second.get();
}
void AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) {
auto *range_map = GetLayoutRangeMap(&image_layout_map, image_state);
auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder, image_state.full_range);
for (; range_gen->non_empty(); ++range_gen) {
range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout));
}
}
// Override base class, we have some extra work to do here
void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) {
if (add_obj) {
if (dev_obj->device_extensions.vk_khr_performance_query) {
auto command_counter = new CommandCounter(this);
dev_obj->object_dispatch.emplace_back(command_counter);
}
ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj);
}
}
// Tracks the number of commands recorded in a command buffer.
void CoreChecks::IncrementCommandCount(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->commandCount++;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
template <typename T1>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (!mem_state) {
result |=
LogError(object, error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (mem_state->destroyed) {
result |= LogError(object, error_code,
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
api_name, report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (image_state->bind_swapchain == VK_NULL_HANDLE) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
result |= LogError(
objlist, error_code,
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
} else if (image_state->create_from_swapchain != image_state->bind_swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(image_state->bind_swapchain);
result |=
LogError(objlist, error_code,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(image_state->bind_swapchain).c_str());
}
} else if (image_state->external_ahb) {
// TODO look into how to properly check for a valid bound memory for an external AHB
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(image_state->binding.mem_state.get(), image_state->image,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(buffer_state->binding.mem_state.get(), buffer_state->buffer,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV),
api_name, error_code);
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state,
const char *api_name, const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureKHR),
api_name, error_code);
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = nullptr;
const char *handle_type = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
handle_type = "BUFFER";
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
handle_type = "IMAGE";
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01045";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01045";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |= LogError(objlist, error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = mem_binding->binding.mem_state.get();
if (prev_binding) {
if (!prev_binding->destroyed) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01044";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01044";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
objlist.add(prev_binding->mem);
skip |=
LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem).c_str());
} else {
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |=
LogError(objlist, kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(device, error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) {
skip |=
LogError(device, error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
// Validate the specified queue families against the families supported by the physical device that owns this device
bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families,
const char *cmd_name, const char *array_parameter_name,
const char *vuid) const {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name,
parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(
device, vuid,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name.c_str());
} else if (queue_families[i] >= physical_device_state->queue_family_known_count) {
LogObjectList obj_list(physical_device);
obj_list.add(device);
skip |=
LogError(obj_list, vuid,
"%s: %s (= %" PRIu32
") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.",
cmd_name, parameter_name.c_str(), queue_families[i],
report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str());
}
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return LogError(pNode->commandBuffer, msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(),
fail_msg);
}
return false;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |=
ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer",
msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
const auto *line_state =
LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext);
if (line_state && line_state->stippledLineEnable) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer",
msg_code);
}
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primary_pass_ci = rp1_state->createInfo;
const auto &secondary_pass_ci = rp2_state->createInfo;
if (primary_pass_ci.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondary_pass_ci.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < max_input_attachment_count; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < max_color_attachment_count; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
// Both renderpasses must agree on Multiview usage
if (primary_desc.viewMask && secondary_desc.viewMask) {
if (primary_desc.viewMask != secondary_desc.viewMask) {
std::stringstream ss;
ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask
<< " while the second has view mask " << secondary_desc.viewMask << ".";
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code);
}
} else if (primary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller,
error_code);
} else if (secondary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller,
error_code);
}
return skip;
}
bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller,
const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string,
report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), msg);
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
// createInfo flags must be identical for the renderpasses to be compatible.
if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |=
LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ "
"%s with a flags of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.flags,
type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.flags);
}
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |= LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
// Find an entry of the Fragment Density Map type in the pNext chain, if it exists
const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext);
const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext);
// Both renderpasses must agree on usage of a Fragment Density Map type
if (fdm1 && fdm2) {
uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment;
uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment;
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
} else if (fdm1) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses a Fragment Density Map while the second one does not.", caller, error_code);
} else if (fdm2) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses a Fragment Density Map while the first one does not.", caller, error_code);
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
std::string DynamicStateString(CBStatusFlags input_value) {
std::string ret;
int index = 0;
while (input_value) {
if (input_value & 1) {
if (!ret.empty()) ret.append("|");
ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1 << index))));
}
++index;
input_value >>= 1;
}
if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0))));
return ret;
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline, const char *caller) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
// Verify vertex & index buffer for unprotected command buffer.
// Because vertex & index buffer is read only, it doesn't need to care protected command buffer case.
if (enabled_features.core11.protectedMemory == VK_TRUE) {
for (const auto &buffer_binding : current_vtx_bfr_binding_info) {
if (buffer_binding.buffer_state && !buffer_binding.buffer_state->destroyed) {
skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer,
"Buffer is vertex buffer");
}
}
if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->destroyed) {
skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller,
vuid.unprotected_command_buffer, "Buffer is index buffer");
}
}
// Verify if using dynamic state setting commands that it doesn't set up in pipeline
CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status);
if (invalid_status) {
std::string dynamic_states = DynamicStateString(invalid_status);
LogObjectList objlist(pCB->commandBuffer);
objlist.add(pPipeline->pipeline);
skip |= LogError(objlist, vuid.dynamic_state_setting_commands,
"%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller,
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), dynamic_states.c_str());
}
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding,
"%s: %s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i,
vertex_binding);
} else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) &&
!enabled_features.robustness2_features.nullDescriptor) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding_null,
"%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex "
"binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
((current_vtx_bfr_binding_info[vertex_binding].buffer_state) ||
enabled_features.robustness2_features.nullDescriptor)) {
auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) {
vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride);
uint32_t attribute_binding_extent =
attribute_description.offset + FormatElementSize(attribute_description.format);
if (vertex_buffer_stride < attribute_binding_extent) {
skip |=
LogError(pCB->commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363",
"The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is less than "
"the extent of the binding for attribute %u (%u).",
vertex_binding, vertex_buffer_stride, i, attribute_binding_extent);
}
}
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i];
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer);
objlist.add(state.pipeline_state->pipeline);
skip |= LogError(
objlist, vuid.vertex_binding_attribute,
"%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
", %s,from of %s and vertex %s.",
caller, i, string_VkFormat(attribute_description.format),
report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer).c_str());
}
} else {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(state.pipeline_state->pipeline);
skip |= LogError(objlist, vuid.vertex_binding_attribute,
"%s: binding #%" PRIu32
" in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dyn_viewport) {
const auto required_viewports_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport(s) ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
if (dyn_scissor) {
const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor(s) ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str());
}
}
bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
// VUID {refpage}-viewportCount-03417
if (dyn_viewport_count && !dyn_scissor_count) {
const auto required_viewport_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask;
if (missing_viewport_mask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport with count ";
ListBits(ss, missing_viewport_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT().";
skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-scissorCount-03418
if (dyn_scissor_count && !dyn_viewport_count) {
const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask;
if (missing_scissor_mask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor with count ";
ListBits(ss, missing_scissor_mask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-viewportCount-03419
if (dyn_scissor_count && dyn_viewport_count) {
if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport and scissor with count ";
ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask);
ss << " are used by pipeline state object, but were not provided via matching calls to "
"vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples);
}
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
LogObjectList objlist(pPipeline->pipeline);
objlist.add(pCB->activeRenderPass->renderPass);
skip |= LogError(objlist, kVUID_Core_DrawState_NumSamplesMismatch,
"%s: Num samples mismatch! At draw-time in %s with %u samples while current %s w/ "
"%u samples!",
caller, report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples);
}
} else {
skip |= LogError(pPipeline->pipeline, kVUID_Core_DrawState_NoActiveRenderpass,
"%s: No active render pass found at draw-time in %s!", caller,
report_data->FormatHandle(pPipeline->pipeline).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object",
pPipeline->rp_state.get(), caller, vuid.render_pass_compatible);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
LogError(pPipeline->pipeline, vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.",
caller, pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
// Check if depth stencil attachment was created with sample location compatible bit
if (pPipeline->sample_location_enabled == VK_TRUE) {
const safe_VkAttachmentReference2 *ds_attachment =
pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment;
const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get();
if ((ds_attachment != nullptr) && (fb_state != nullptr)) {
const uint32_t attachment = ds_attachment->attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
const auto *imageview_state = GetActiveAttachmentImageViewState(pCB, attachment);
if (imageview_state != nullptr) {
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (image_state != nullptr) {
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) {
skip |= LogError(pPipeline->pipeline, vuid.sample_location,
"%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth "
"stencil attachment's VkImage was not created with "
"VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.",
caller, pCB->activeSubpass);
}
}
}
}
}
}
}
// VUID {refpage}-primitiveTopology-03420
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer",
vuid.primitive_topology);
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) {
bool compatible_topology = false;
switch (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
default:
break;
}
if (!compatible_topology) {
skip |= LogError(pPipeline->pipeline, vuid.primitive_topology,
"%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is "
"not compatible with the pipeline topology %s.",
caller, string_VkPrimitiveTopology(pCB->primitiveTopology),
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) {
skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid);
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream error_str;
error_str << report_data->FormatHandle(pipeline_layout->layout) << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = error_str.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex].get();
return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg);
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function) const {
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const auto lv_bind_point = ConvertToLvlBindPoint(bind_point);
const auto &state = cb_node->lastBound[lv_bind_point];
const auto *pipe = state.pipeline_state;
if (nullptr == pipe) {
return LogError(cb_node->commandBuffer, vuid.pipeline_bound,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR
? "RayTracing"
: bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
// First check flag states
result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state);
if (cb_node->activeRenderPass && cb_node->activeFramebuffer) {
// Verify attachments for unprotected/protected command buffer.
if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) {
uint32_t i = 0;
for (const auto &view_state : *cb_node->active_attachments.get()) {
const auto &subpass = cb_node->active_subpasses->at(i);
if (subpass.used && view_state && !view_state->destroyed) {
std::string image_desc = "Image is ";
image_desc.append(string_VkImageUsageFlagBits(subpass.usage));
// Because inputAttachment is read only, it doesn't need to care protected command buffer case.
// Some CMD_TYPE could not be protected. See VUID 02711.
if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT &&
vuid.protected_command_buffer != kVUIDUndefined) {
result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function,
vuid.protected_command_buffer, image_desc.c_str());
}
result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function,
vuid.unprotected_command_buffer, image_desc.c_str());
}
++i;
}
}
}
}
// Now complete other state checks
string error_string;
auto const &pipeline_layout = pipe->pipeline_layout.get();
// Check if the current pipeline is compatible for the maximum used set with the bound sets.
if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) {
LogObjectList objlist(pipe->pipeline);
objlist.add(pipeline_layout->layout);
objlist.add(state.pipeline_layout);
result |= LogError(objlist, vuid.compatible_pipeline,
"%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32
" with bound descriptor sets, last bound with %s",
CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str(), pipe->max_active_slot,
report_data->FormatHandle(state.pipeline_layout).c_str());
}
for (const auto &set_binding_pair : pipe->active_slots) {
uint32_t set_index = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) {
result |= LogError(cb_node->commandBuffer, kVUID_Core_DrawState_DescriptorSetNotBound,
"%s uses set #%u but that set is not bound.", report_data->FormatHandle(pipe->pipeline).c_str(),
set_index);
} else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout,
set_index, error_string)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet();
LogObjectList objlist(set_handle);
objlist.add(pipeline_layout->layout);
result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s bound as set #%u is not compatible with overlapping %s due to: %s",
report_data->FormatHandle(set_handle).c_str(), set_index,
report_data->FormatHandle(pipeline_layout->layout).c_str(), error_string.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[set_index].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[set_index].validated_set != descriptor_set ||
state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count);
bool need_validate = descriptor_set_changed ||
// Revalidate if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_validate) {
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only validate the bindings that haven't already been validated
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[set_index].validated_set_binding_req_map.begin(),
state.per_set[set_index].validated_set_binding_req_map.end(),
std::inserter(delta_reqs, delta_reqs.begin()));
result |=
ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid);
} else {
result |=
ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node,
cb_node->active_attachments.get(), *cb_node->active_subpasses.get(), function, vuid);
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe, function);
}
// Verify if push constants have been set
if (cb_node->push_constant_data_ranges) {
if (pipeline_layout->push_constant_ranges != cb_node->push_constant_data_ranges) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(cb_node->push_constant_pipeline_layout_set);
objlist.add(pipeline_layout->layout);
objlist.add(pipe->pipeline);
result |= LogError(
objlist, vuid.push_constants_set, "The active push constants of %s isn't compatible with %s of active %s.",
report_data->FormatHandle(cb_node->push_constant_pipeline_layout_set).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str(), report_data->FormatHandle(pipe->pipeline).c_str());
} else {
for (const auto &stage : pipe->stage_state) {
const auto *entrypoint =
FindEntrypointStruct(stage.shader_state.get(), stage.entry_point_name.c_str(), stage.stage_flag);
if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) {
continue;
}
const auto it = cb_node->push_constant_data_update.find(stage.stage_flag);
if (it == cb_node->push_constant_data_update.end()) {
// This error has been printed in ValidatePushConstantUsage.
break;
}
uint32_t issue_index = 0;
const auto ret = ValidatePushConstantSetUpdate(it->second, entrypoint->push_constant_used_in_shader, issue_index);
// "not set" error has been printed in ValidatePushConstantUsage.
if (ret == PC_Byte_Not_Updated) {
const auto loc_descr = entrypoint->push_constant_used_in_shader.GetLocationDesc(issue_index);
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(pipeline_layout->layout);
result |= LogError(objlist, vuid.push_constants_set, "Push-constant buffer:%s in %s of %s is not updated.",
loc_descr.c_str(), string_VkShaderStageFlags(stage.stage_flag).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str());
break;
}
}
}
}
return result;
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (!((pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and
// TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified",
pipelineIndex);
} else if (pipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |=
LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.",
pipelineIndex);
} else {
base_pipeline = pPipelines[pipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->graphicsPipelineCI.basePipelineHandle);
}
if (base_pipeline && !(base_pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex);
}
}
// Check for portability errors
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) &&
(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452",
"Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported",
pipelineIndex);
}
// Validate vertex inputs
for (const auto desc : pipeline->vertex_binding_descriptions_) {
if ((desc.stride < phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) ||
((desc.stride % phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) != 0)) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDescription-stride-04456",
"Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a "
"multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.",
pipelineIndex);
}
}
// Validate vertex attributes
if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) {
for (const auto attrib : pipeline->vertex_attribute_descriptions_) {
const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding);
if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) {
const auto desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second];
if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) {
skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457",
"Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + "
"sizeof(vertex_description.format)) is larger than the vertex stride",
pipelineIndex);
}
}
}
}
// Validate polygon mode
auto raster_state_ci = pipeline->graphicsPipelineCI.pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci &&
(VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) {
skip |=
LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458",
"Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex);
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo[%u] State: Subpass index %u is out of range for this renderpass (0..%u).",
pipelineIndex, pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(),
pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]),
sizeof(attachments[0]))) {
skip |=
LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo[%u]: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.",
pipelineIndex);
break;
}
}
}
}
if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo[%u]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.",
pipelineIndex);
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726",
"Invalid Pipeline CreateInfo[%u] State: Multiple shaders provided for stage %s", pipelineIndex,
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader or Mesh Shader required.", pipelineIndex);
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo[%u] State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).",
pipelineIndex);
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader required.", pipelineIndex);
}
}
if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo[%u] State: Mesh Shader not supported.", pipelineIndex);
}
if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo[%u] State: Task Shader not supported.", pipelineIndex);
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
if (!has_control && has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo[%u] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex);
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo[%u] State: Missing pInputAssemblyState.", pipelineIndex);
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.",
pipelineIndex);
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState) {
if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.",
pipelineIndex);
}
}
if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(
device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and geometry shaders feature is not enabled. "
"It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and tessellation shaders feature is not "
"enabled. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!enabled_features.core.depthClamp)) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthClamp device feature is disabled: the "
"depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBiasClamp device feature is disabled: the "
"depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled",
pipelineIndex);
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!enabled_features.core.alphaToOne)) {
skip |= LogError(
device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo[%u] State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.",
pipelineIndex);
} else if (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) {
if (!enabled_features.core.depthBounds) {
skip |=
LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.",
pipelineIndex);
}
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted &&
!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
const float minDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->minDepthBounds;
const float maxDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->maxDepthBounds;
// Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.",
minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension "
"is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is "
"true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.",
maxDepthBounds);
}
}
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo[%u] State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.",
pipelineIndex);
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo[%u] State: Missing pVertexInputState.", pipelineIndex);
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) {
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipeline->graphicsPipelineCI.pMultisampleState;
auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accum_color_samples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(max_sample_count != static_cast<VkSampleCountFlagBits>(0)) &&
(multisample_state->rasterizationSamples != max_sample_count)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
if (device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
const bool ds_test_enabled =
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (multisample_state) {
if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) {
skip |=
LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state =
LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |= LogError(
device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (device_extensions.vk_nv_coverage_reduction_mode) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
uint32_t subpass_depth_samples = 0;
accum_color_samples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
if (multisample_state && IsPowerOfTwo(subpass_color_samples) &&
(subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) {
const auto *coverage_reduction_state =
LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext);
if (coverage_reduction_state) {
const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode;
uint32_t combination_count = 0;
std::vector<VkFramebufferMixedSamplesCombinationNV> combinations;
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
nullptr);
combinations.resize(combination_count);
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
&combinations[0]);
bool combination_found = false;
for (const auto &combination : combinations) {
if (coverage_reduction_mode == combination.coverageReductionMode &&
raster_samples == combination.rasterizationSamples &&
subpass_depth_samples == combination.depthStencilSamples &&
subpass_color_samples == combination.colorSamples) {
combination_found = true;
break;
}
}
if (!combination_found) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722",
"vkCreateGraphicsPipelines: pCreateInfos[%d] the specified combination of coverage "
"reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for "
"the subpass color and depth/stencil attachments is not a valid combination returned by "
"vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.",
pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode));
}
}
}
}
if (device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
std::ostringstream str;
str << "references an attachment with an invalid format ("
<< string_VkFormat(color_attachment.format) << ").";
error_detail = str.str();
break;
}
} else {
std::ostringstream str;
str << "references an invalid attachment. The subpass pColorAttachments["
<< coverage_to_color_state->coverageToColorLocation
<< "].attachment has the value VK_ATTACHMENT_UNUSED.";
error_detail = str.str();
}
} else {
std::ostringstream str;
str << "references an non-existing attachment since the subpass colorAttachmentCount is "
<< subpass_desc->colorAttachmentCount << ".";
error_detail = str.str();
}
if (!attachment_is_valid) {
skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
if (device_extensions.vk_ext_sample_locations) {
const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state =
LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext);
if (sample_location_state != nullptr) {
if ((sample_location_state->sampleLocationsEnable == VK_TRUE) &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) {
const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo;
skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines");
const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize;
VkMultisamplePropertiesEXT multisample_prop;
DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples,
&multisample_prop);
const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize;
// Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT"
if (SafeModulo(max_grid_size.width, grid_size.width) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).",
pipelineIndex, grid_size.width, max_grid_size.width);
}
if (SafeModulo(max_grid_size.height, grid_size.height) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).",
pipelineIndex, grid_size.height, max_grid_size.height);
}
if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must "
"be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).",
pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
}
}
}
skip |= ValidatePipelineCacheControlFlags(pPipeline->graphicsPipelineCI.flags, pipelineIndex, "vkCreateGraphicsPipelines",
"VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878");
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378",
"Extended dynamic state used by the extendedDynamicState feature is not enabled");
}
const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state =
LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(pPipeline->graphicsPipelineCI.pNext);
if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) {
const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR";
if (fragment_shading_rate_state->fragmentSize.width == 0) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494",
"vkCreateGraphicsPipelines: Fragment width of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height == 0) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495",
"vkCreateGraphicsPipelines: Fragment height of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496",
"vkCreateGraphicsPipelines: Non-power-of-two fragment width of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height != 0 &&
!IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497",
"vkCreateGraphicsPipelines: Non-power-of-two fragment height of %u has been specified in %s.",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.width > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498",
"vkCreateGraphicsPipelines: Fragment width of %u specified in %s is too large.",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (fragment_shading_rate_state->fragmentSize.height > 4) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499",
"vkCreateGraphicsPipelines: Fragment height of %u specified in %s is too large",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.width != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
fragment_shading_rate_state->fragmentSize.width, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
fragment_shading_rate_state->fragmentSize.height != 1) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500",
"vkCreateGraphicsPipelines: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
fragment_shading_rate_state->fragmentSize.height, struct_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501",
"vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502",
"vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506",
"vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is not supported",
string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name);
}
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const {
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node != setMap.end()) {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_ENDRENDERPASS2)) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
const uint32_t queue_family_index = pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT,
VK_QUEUE_PROTECTED_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return LogError(cb_node->commandBuffer, error_code,
"%s(): Called in command buffer %s which was allocated from the command pool %s which was created with "
"queueFamilyIndex %u which doesn't contain the required %s capability flags.",
caller_name, report_data->FormatHandle(cb_node->commandBuffer).c_str(),
report_data->FormatHandle(pool->commandPool).c_str(), queue_family_index,
required_flags_string.c_str());
}
}
return false;
}
bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const {
bool skip = false;
const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel;
const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width *
pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count);
if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527",
"%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel "
"sample rate which currently is (%u * %u * %u).",
apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width,
pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count));
}
if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526",
"%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check "
"VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.",
apiName, string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *cause_str = GetCauseStr(obj);
string vuid;
std::ostringstream str;
str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type];
vuid = str.str();
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(obj);
skip |=
LogError(objlist, vuid.c_str(), "You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Autogenerated as part of the vk_validation_error_message.h codegen
// This accounts for the following VUIDs, enumerated here for search and tracking purposes:
/* "VUID-vkCmdBeginConditionalRenderingEXT-commandBuffer-recording",
"VUID-vkCmdBeginDebugUtilsLabelEXT-commandBuffer-recording",
"VUID-vkCmdBeginQuery-commandBuffer-recording",
"VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-recording",
"VUID-vkCmdBeginRenderPass-commandBuffer-recording",
"VUID-vkCmdBeginRenderPass2-commandBuffer-recording",
"VUID-vkCmdBeginTransformFeedbackEXT-commandBuffer-recording",
"VUID-vkCmdBindDescriptorSets-commandBuffer-recording",
"VUID-vkCmdBindIndexBuffer-commandBuffer-recording",
"VUID-vkCmdBindPipeline-commandBuffer-recording",
"VUID-vkCmdBindPipelineShaderGroupNV-commandBuffer-recording",
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording",
"VUID-vkCmdBindTransformFeedbackBuffersEXT-commandBuffer-recording",
"VUID-vkCmdBindVertexBuffers-commandBuffer-recording",
"VUID-vkCmdBindVertexBuffers2EXT-commandBuffer-recording",
"VUID-vkCmdBlitImage-commandBuffer-recording",
"VUID-vkCmdBlitImage2KHR-commandBuffer-recording",
"VUID-vkCmdBuildAccelerationStructuresIndirectKHR-commandBuffer-recording",
"VUID-vkCmdBuildAccelerationStructuresKHR-commandBuffer-recording",
"VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-recording",
"VUID-vkCmdClearAttachments-commandBuffer-recording",
"VUID-vkCmdClearColorImage-commandBuffer-recording",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-recording",
"VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-recording",
"VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-recording",
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-recording",
"VUID-vkCmdCopyBuffer-commandBuffer-recording",
"VUID-vkCmdCopyBuffer2KHR-commandBuffer-recording",
"VUID-vkCmdCopyBufferToImage-commandBuffer-recording",
"VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-recording",
"VUID-vkCmdCopyImage-commandBuffer-recording",
"VUID-vkCmdCopyImage2KHR-commandBuffer-recording",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-recording",
"VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-recording",
"VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-recording",
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording",
"VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording",
"VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording",
"VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording",
"VUID-vkCmdDispatch-commandBuffer-recording",
"VUID-vkCmdDispatchBase-commandBuffer-recording",
"VUID-vkCmdDispatchIndirect-commandBuffer-recording",
"VUID-vkCmdDraw-commandBuffer-recording",
"VUID-vkCmdDrawIndexed-commandBuffer-recording",
"VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording",
"VUID-vkCmdDrawIndexedIndirectCount-commandBuffer-recording",
"VUID-vkCmdDrawIndirect-commandBuffer-recording",
"VUID-vkCmdDrawIndirectByteCountEXT-commandBuffer-recording",
"VUID-vkCmdDrawIndirectCount-commandBuffer-recording",
"VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording",
"VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording",
"VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording",
"VUID-vkCmdEndConditionalRenderingEXT-commandBuffer-recording",
"VUID-vkCmdEndDebugUtilsLabelEXT-commandBuffer-recording",
"VUID-vkCmdEndQuery-commandBuffer-recording",
"VUID-vkCmdEndQueryIndexedEXT-commandBuffer-recording",
"VUID-vkCmdEndRenderPass-commandBuffer-recording",
"VUID-vkCmdEndRenderPass2-commandBuffer-recording",
"VUID-vkCmdEndTransformFeedbackEXT-commandBuffer-recording",
"VUID-vkCmdExecuteCommands-commandBuffer-recording",
"VUID-vkCmdExecuteGeneratedCommandsNV-commandBuffer-recording",
"VUID-vkCmdFillBuffer-commandBuffer-recording",
"VUID-vkCmdInsertDebugUtilsLabelEXT-commandBuffer-recording",
"VUID-vkCmdNextSubpass-commandBuffer-recording",
"VUID-vkCmdNextSubpass2-commandBuffer-recording",
"VUID-vkCmdPipelineBarrier-commandBuffer-recording",
"VUID-vkCmdPreprocessGeneratedCommandsNV-commandBuffer-recording",
"VUID-vkCmdPushConstants-commandBuffer-recording",
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording",
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording",
"VUID-vkCmdResetEvent-commandBuffer-recording",
"VUID-vkCmdResetQueryPool-commandBuffer-recording",
"VUID-vkCmdResolveImage-commandBuffer-recording",
"VUID-vkCmdResolveImage2KHR-commandBuffer-recording",
"VUID-vkCmdSetBlendConstants-commandBuffer-recording",
"VUID-vkCmdSetCheckpointNV-commandBuffer-recording",
"VUID-vkCmdSetCoarseSampleOrderNV-commandBuffer-recording",
"VUID-vkCmdSetCullModeEXT-commandBuffer-recording",
"VUID-vkCmdSetDepthBias-commandBuffer-recording",
"VUID-vkCmdSetDepthBounds-commandBuffer-recording",
"VUID-vkCmdSetDepthBoundsTestEnableEXT-commandBuffer-recording",
"VUID-vkCmdSetDepthCompareOpEXT-commandBuffer-recording",
"VUID-vkCmdSetDepthTestEnableEXT-commandBuffer-recording",
"VUID-vkCmdSetDepthWriteEnableEXT-commandBuffer-recording",
"VUID-vkCmdSetDeviceMask-commandBuffer-recording",
"VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording",
"VUID-vkCmdSetEvent-commandBuffer-recording",
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording",
"VUID-vkCmdSetFragmentShadingRateKHR-commandBuffer-recording",
"VUID-vkCmdSetFrontFaceEXT-commandBuffer-recording",
"VUID-vkCmdSetLineStippleEXT-commandBuffer-recording",
"VUID-vkCmdSetLineWidth-commandBuffer-recording",
"VUID-vkCmdSetPerformanceMarkerINTEL-commandBuffer-recording",
"VUID-vkCmdSetPerformanceOverrideINTEL-commandBuffer-recording",
"VUID-vkCmdSetPerformanceStreamMarkerINTEL-commandBuffer-recording",
"VUID-vkCmdSetPrimitiveTopologyEXT-commandBuffer-recording",
"VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording",
"VUID-vkCmdSetScissor-commandBuffer-recording",
"VUID-vkCmdSetScissorWithCountEXT-commandBuffer-recording",
"VUID-vkCmdSetStencilCompareMask-commandBuffer-recording",
"VUID-vkCmdSetStencilOpEXT-commandBuffer-recording",
"VUID-vkCmdSetStencilReference-commandBuffer-recording",
"VUID-vkCmdSetStencilTestEnableEXT-commandBuffer-recording",
"VUID-vkCmdSetStencilWriteMask-commandBuffer-recording",
"VUID-vkCmdSetViewport-commandBuffer-recording",
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording",
"VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording",
"VUID-vkCmdSetViewportWithCountEXT-commandBuffer-recording",
"VUID-vkCmdTraceRaysIndirectKHR-commandBuffer-recording",
"VUID-vkCmdTraceRaysKHR-commandBuffer-recording",
"VUID-vkCmdTraceRaysNV-commandBuffer-recording",
"VUID-vkCmdUpdateBuffer-commandBuffer-recording",
"VUID-vkCmdWaitEvents-commandBuffer-recording",
"VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-recording",
"VUID-vkCmdWriteBufferMarkerAMD-commandBuffer-recording",
"VUID-vkCmdWriteTimestamp-commandBuffer-recording",
"VUID-vkEndCommandBuffer-commandBuffer-00059"
*/
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) const {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(cb_state, caller_name);
default:
assert(cmd != CMD_NONE);
const auto error = KGeneratedMustBeRecordingList[cmd];
return LogError(cb_state->commandBuffer, error, "You must call vkBeginCommandBuffer() before this call to %s.",
caller_name);
}
}
bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type,
const char *caller_name) const {
bool skip = false;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
if ((cb_state != nullptr) && (buffer_state != nullptr)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit,
caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT");
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuid.indirect_protected_cb,
"%s: Indirect commands can't be used in protected command buffers.", caller_name);
}
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= LogError(pCB->commandBuffer, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(),
pCB->active_render_pass_device_mask);
}
return skip;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = LogError(pCB->commandBuffer, msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = LogError(pCB->commandBuffer, msgCode, "%s: This call must be issued inside an active render pass.", apiName);
}
return outside;
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) const {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
skip |= LogError(pd_state->phys_device, err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd,
std::to_string(pd_state->queue_family_known_count).c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) const {
bool skip = false;
std::unordered_set<uint32_t> queue_family_set;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (queue_family_set.insert(requested_queue_family).second == false) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
queue_family_var_name.c_str(), requested_queue_family);
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features =
LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext);
if (fragment_shading_rate_features) {
const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features =
LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04478",
"vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04479",
"vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04480",
"vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features =
LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext);
if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) {
if (fragment_shading_rate_features->pipelineFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481",
"vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->primitiveFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482",
"vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features "
"simultaneously.");
}
if (fragment_shading_rate_features->attachmentFragmentShadingRate) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483",
"vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features "
"simultaneously.");
}
}
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
core_checks->SetSetImageViewInitialLayoutCallback(
[core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout);
});
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
}
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
// Similarly for mesh and task shaders.
bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id,
const char *tess_error_id, const char *mesh_error_id,
const char *task_error_id) const {
bool skip = false;
if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |=
LogError(device, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |= LogError(device, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
skip |= LogError(device, mesh_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
skip |= LogError(device, task_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
caller);
}
return skip;
}
bool CoreChecks::ValidateStageMaskHost(VkPipelineStageFlags stageMask, const char *caller, const char *vuid) const {
bool skip = false;
if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) {
skip |= LogError(
device, vuid,
"%s: stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.",
caller);
}
return skip;
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs;
std::vector<const QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const CMD_BUFFER_STATE *pCB, int current_submit_count) const {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= LogError(device, "VUID-vkQueueSubmit-pCommandBuffers-00071",
"%s is already in use and is not marked for simultaneous use.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled[command_buffer_state]) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= LogError(cb_state->commandBuffer, vu_id, "%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
case CB_RECORDING:
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(object);
skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626",
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(),
queueFamilyIndex);
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
bool skip = false;
auto pool = pCB->command_pool.get();
auto queue_state = GetQueueState(queue);
if (pool && queue_state) {
if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), pool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
skip |= LogError(pCB->commandBuffer, "VUID-VkSubmitInfo-pCommandBuffers-00075",
"Command buffer %s was included in the pCommandBuffers array of QueueSubmit but was allocated with "
"VK_COMMAND_BUFFER_LEVEL_SECONDARY.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
} else {
for (auto sub_cb : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer) &&
!(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
LogObjectList objlist(device);
objlist.add(pCB->commandBuffer);
objlist.add(sub_cb->commandBuffer);
objlist.add(sub_cb->primaryCommandBuffer);
skip |= LogError(objlist, "VUID-vkQueueSubmit-pCommandBuffers-00073",
"%s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str());
}
}
}
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid,
const char *func_name) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
skip |= LogError(pFence->fence, inflight_vuid, "%s: %s is already in use by another submission.", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
skip |= LogError(pFence->fence, retired_vuid,
"%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto secondary_cmd_buffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondary_cmd_buffer);
RecordQueuedQFOTransfers(secondary_cmd_buffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const {
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
return true;
}
}
}
}
return false;
}
bool CoreChecks::ValidateSemaphoresForSubmit(VkQueue queue, const VkSubmitInfo *submit, uint32_t submit_index,
unordered_set<VkSemaphore> *unsignaled_sema_arg,
unordered_set<VkSemaphore> *signaled_sema_arg,
unordered_set<VkSemaphore> *internal_sema_arg) const {
bool skip = false;
auto &signaled_semaphores = *signaled_sema_arg;
auto &unsignaled_semaphores = *unsignaled_sema_arg;
auto &internal_semaphores = *internal_sema_arg;
auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
const char *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueSubmit-pWaitSemaphores-03238"
: "VUID-vkQueueSubmit-pWaitSemaphores-00069";
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |=
ValidateStageMaskGsTsEnables(submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077",
"VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090");
skip |= ValidateStageMaskHost(submit->pWaitDstStageMask[i], "vkQueueSubmit()", "VUID-VkSubmitInfo-pWaitDstStageMask-00078");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: pSubmits[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but pSubmits[%u] does "
"not include an instance of VkTimelineSemaphoreSubmitInfo",
submit_index, i, report_data->FormatHandle(semaphore).c_str(), submit_index);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240",
"VkQueueSubmit: pSubmits[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than "
"pSubmits[%u].waitSemaphoreCount (%u)",
submit_index, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, submit_index, submit->waitSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
(semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist, semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit: Queue %s is waiting on pSubmits[%u].pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), submit_index, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: pSubmits[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but pSubmits[%u] "
"does not include an instance of VkTimelineSemaphoreSubmitInfo",
submit_index, i, report_data->FormatHandle(semaphore).c_str(), submit_index);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241",
"VkQueueSubmit: pSubmits[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than "
"pSubmits[%u].signalSemaphoreCount (%u)",
submit_index, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, submit_index, submit->signalSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pSignalSemaphores-03242",
"VkQueueSubmit: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pSubmits[%u].pSignalSemaphores[%u]",
semaphore_state->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], submit_index, i);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
(semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(semaphore_state->signaler.first);
skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit: %s is signaling pSubmits[%u].pSignalSemaphores[%u] (%s) that was previously "
"signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), submit_index, i,
report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(semaphore_state->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
return skip;
}
bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(VkSemaphore semaphore, uint64_t value, const char *func_name,
const char *vuid) const {
bool skip = false;
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) return false;
uint64_t diff = value > semaphore_state->payload ? value - semaphore_state->payload : semaphore_state->payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding current semaphore %s payload", func_name,
report_data->FormatHandle(semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == semaphore) {
diff = value > signal_semaphore.payload ? value - signal_semaphore.payload : signal_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding pending semaphore %s signal value",
func_name, report_data->FormatHandle(semaphore).c_str());
}
}
}
for (const auto &wait_semaphore : submission.waitSemaphores) {
if (wait_semaphore.semaphore == semaphore) {
diff = value > wait_semaphore.payload ? value - wait_semaphore.payload : wait_semaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding pending semaphore %s wait value",
func_name, report_data->FormatHandle(semaphore).c_str());
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCommandBuffersForSubmit(VkQueue queue, const VkSubmitInfo *submit,
GlobalImageLayoutMap *overlayImageLayoutMap_arg,
QueryMap *local_query_to_state_map,
vector<VkCommandBuffer> *current_cmds_arg) const {
bool skip = false;
auto queue_state = GetQueueState(queue);
GlobalImageLayoutMap &overlay_layout_map = *overlayImageLayoutMap_arg;
vector<VkCommandBuffer> ¤t_cmds = *current_cmds_arg;
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
EventToStageMap local_event_to_stage_map;
const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const auto *cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, &overlay_layout_map);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
cb_node, static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i])),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(cb_node, queue);
for (auto descriptor_set : cb_node->validate_descriptorsets_in_queuesubmit) {
const cvdescriptorset::DescriptorSet *set_node = GetSetNode(descriptor_set.first);
if (set_node) {
for (auto cmd_info : descriptor_set.second) {
std::string function = "vkQueueSubmit(), ";
function += cmd_info.function;
for (auto binding_info : cmd_info.binding_infos) {
std::string error;
std::vector<uint32_t> dynamic_offsets;
// dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty.
// This submit time not record time...
const bool record_time_validate = false;
skip |= ValidateDescriptorSetBindingData(cb_node, set_node, dynamic_offsets, binding_info,
cmd_info.framebuffer, cmd_info.attachments.get(),
*cmd_info.subpasses.get(), record_time_validate,
function.c_str(), GetDrawDispatchVuid(cmd_info.cmd_type));
}
}
}
}
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time)
for (auto &function : cb_node->queue_submit_functions) {
skip |= function(this, queue_state);
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(this, /*do_validate*/ true, &local_event_to_stage_map);
}
VkQueryPool first_perf_query_pool = VK_NULL_HANDLE;
for (auto &function : cb_node->queryUpdates) {
skip |= function(this, /*do_validate*/ true, first_perf_query_pool, perf_pass, local_query_to_state_map);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) const {
const auto *fence_state = GetFenceState(fence);
bool skip =
ValidateFenceForSubmit(fence_state, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()");
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
vector<VkCommandBuffer> current_cmds;
GlobalImageLayoutMap overlay_image_layout_map;
QueryMap local_query_to_state_map;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
skip |= ValidateSemaphoresForSubmit(queue, submit, submit_idx, &unsignaled_semaphores, &signaled_semaphores,
&internal_semaphores);
skip |= ValidateCommandBuffersForSubmit(queue, submit, &overlay_image_layout_map, &local_query_to_state_map, ¤t_cmds);
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue,
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
}
auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext);
if (protected_submit_info) {
const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE;
// Only check feature once for submit
if ((protected_submit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816",
"vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
// Make sure command buffers are all protected or unprotected
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protected_submit == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148",
"vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protected_submit == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120",
"vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pWaitSemaphoreValues[i], "VkQueueSubmit",
"VUID-VkSubmitInfo-pWaitSemaphores-03243");
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pSignalSemaphoreValues[i], "VkQueueSubmit",
"VUID-VkSubmitInfo-pSignalSemaphores-03244");
}
}
}
}
return skip;
}
#ifdef AHB_VALIDATION_SUPPORT
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
{ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfo::handleTypes when memory was created.
if (!mem_info->is_export ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
const auto image_state = GetImageState(mem_info->dedicated_image);
// count() requires DEVICE_MEMORY_STATE* const & or DEVICE_MEMORY_STATE*, not const DEVICE_MEMORY_STATE*.
// But here is in a const function. It could get const DEVICE_MEMORY_STATE* only, so cast it.
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count((DEVICE_MEMORY_STATE *)mem_info)))) {
LogObjectList objlist(device);
objlist.add(pInfo->memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) {
skip |=
LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.",
ahb_desc.usage);
}
}
// Collect external buffer info
VkPhysicalDeviceExternalBufferInfo pdebi = {};
pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO;
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
VkExternalBufferProperties ext_buf_props = {};
ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
// Collect external format info
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
VkPhysicalDeviceImageFormatInfo2 pdifi2 = {};
pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi2.pNext = &pdeifi;
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
VkExternalImageFormatProperties ext_img_fmt_props = {};
ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
VkImageFormatProperties2 ifp2 = {};
ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
ifp2.pNext = &ext_img_fmt_props;
VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2);
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_format_props;
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits (0x%" PRIx64
") include an issue not listed in the AHardwareBuffer Usage Equivalence table.",
ici->usage);
}
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ",
string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage);
}
}
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) {
const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0
? "VUID-vkGetImageMemoryRequirements-image-04004"
: "VUID-VkImageMemoryRequirementsInfo2-image-01897";
skip |=
LogError(image, vuid,
"%s: Attempt get image memory requirements for an image created with a "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986"
: "VUID-VkBindBufferMemoryInfo-memory-02986";
LogObjectList objlist(buffer);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) "
"VkExternalMemoryBufferreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990"
: "VUID-VkBindImageMemoryInfo-memory-02990";
LogObjectList objlist(image);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) "
"VkExternalMemoryImageCreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
#else // !AHB_VALIDATION_SUPPORT
// Case building for Android without AHB Validation
#ifdef VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
return false;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
return false;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; }
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
return false;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
return false;
}
#endif // AHB_VALIDATION_SUPPORT
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101",
"vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device,
"VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |=
ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714",
"vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only "
"advertises %u memory types.",
pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount);
} else {
const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex];
if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713",
"vkAllocateMemory: attempting to allocate %" PRIu64
" bytes from heap %u,"
"but size of that heap is only %" PRIu64 " bytes.",
pAllocateInfo->allocationSize, memory_type.heapIndex,
phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size);
}
if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) {
skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790",
"vkAllocateMemory: attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872",
"vkAllocateMemory(): attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
}
bool imported_ahb = false;
#ifdef AHB_VALIDATION_SUPPORT
// "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL
// buffer value. Memory imported has another VUID to check size and allocationSize match up
auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext);
if (imported_ahb_info != nullptr) {
imported_ahb = imported_ahb_info->buffer != nullptr;
}
#endif // AHB_VALIDATION_SUPPORT
auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext);
if (dedicated_allocate_info) {
if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) {
skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432",
"vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo");
} else if (dedicated_allocate_info->image != VK_NULL_HANDLE) {
// Dedicated VkImage
const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image);
if (image_state->disjoint == true) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_DISJOINT_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
} else {
if ((pAllocateInfo->allocationSize != image_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-image-02964"
: "VUID-VkMemoryDedicatedAllocateInfo-image-01433";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkImage %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(),
image_state->requirements.size);
}
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
}
}
} else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
// Dedicated VkBuffer
const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer);
if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965"
: "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkBuffer %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(),
buffer_state->requirements.size);
}
if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436",
"vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->buffer).c_str());
}
}
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name,
const char *error_code) const {
if (disabled[object_in_use]) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const {
bool skip = false;
assert(mem_info);
const auto mem = mem_info->mem;
if (size == 0) {
skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero");
}
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mapped_range.size != 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.",
report_data->FormatHandle(mem).c_str());
}
// Validate offset is not over allocaiton size
if (offset >= mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-offset-00679",
"VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64
" which is larger than the total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize);
}
// Validate that offset + size is within object's allocationSize
if (size != VK_WHOLE_SIZE) {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-size-00681",
"VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64
".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) const {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex",
"VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
const auto &queue_data = queue_family_index_map.find(queueFamilyIndex);
if ((queue_data != queue_family_index_map.end()) && (queue_data->second <= queueIndex)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
const auto &queue_flags = queue_family_create_flags_map.find(queueFamilyIndex);
if ((queue_flags != queue_family_create_flags_map.end()) && (queue_flags->second != 0)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-flags-01841",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") was created with a non-zero VkDeviceQueueCreateFlags. Need to use vkGetDeviceQueue2 instead.",
queueIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const {
const QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const {
bool skip = false;
const auto &const_queue_map = queueMap;
for (auto &queue : const_queue_map) {
skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const {
bool skip = false;
auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext);
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE &&
!enabled_features.core12.timelineSemaphore && !device_extensions.vk_khr_timeline_semaphore) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252",
"VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores");
}
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY &&
sem_type_create_info->initialValue != 0) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279",
"vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero");
}
return skip;
}
bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores");
}
bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR");
}
bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout,
const char *apiName) const {
bool skip = false;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
auto *semaphore_state = GetSemaphoreState(pWaitInfo->pSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256",
"%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName,
report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) const {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const {
const EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) const {
if (disabled[query_validation]) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
bool skip = false;
if (qp_state) {
skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state,
uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) {
string invalid_flags_string;
for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) {
if (flag & flags) {
if (invalid_flags_string.size()) {
invalid_flags_string += " and ";
}
invalid_flags_string += string_VkQueryResultFlagBits(flag);
}
}
skip |= LogError(query_pool_state->pool,
strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230"
: "VUID-vkCmdCopyQueryPoolResults-queryType-03233",
"%s: QueryPool %s was created with a queryType of"
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), invalid_flags_string.c_str());
}
for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) {
uint32_t submitted = 0;
for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) {
QueryObject obj(QueryObject(query_pool_state->pool, query_index), pass_index);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++;
}
if (submitted < query_pool_state->n_performance_passes) {
skip |= LogError(query_pool_state->pool, "VUID-vkGetQueryPoolResults-queryType-03231",
"%s: QueryPool %s has %u performance query passes, but the query has only been "
"submitted for %u of the passes.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(),
query_pool_state->n_performance_passes, submitted);
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
const char *apiName) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip;
if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 ||
(stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229",
"%s(): QueryPool %s was created with a queryType of "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the "
"size of VkPerformanceCounterResultKHR.",
apiName, report_data->FormatHandle(queryPool).c_str());
}
skip |= ValidatePerformanceQueryResults(apiName, query_pool_state, firstQuery, queryCount, flags);
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()",
"VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816");
skip |=
ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults");
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= LogError(
queryPool, "VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (!skip) {
uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0;
uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
uint32_t query_items = 0;
uint32_t query_size = 0;
switch (query_pool_state->createInfo.queryType) {
case VK_QUERY_TYPE_OCCLUSION:
// Occlusion queries write one integer value - the number of samples passed.
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
// Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics
// when the pool is created
{
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics);
query_items = static_cast<uint32_t>(pipe_stats_bits.count());
query_size = query_size_in_bytes * (query_items + query_avail_data);
}
break;
case VK_QUERY_TYPE_TIMESTAMP:
// Timestamp queries write one integer
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
// Transform feedback queries write two integers
query_items = 2;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
// Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR
query_items = query_pool_state->perf_counter_index_count;
query_size = sizeof(VkPerformanceCounterResultKHR) * query_items;
if (query_size > stride) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519",
"vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64
" which must be at least counterIndexCount (%d) "
"multiplied by sizeof(VkPerformanceCounterResultKHR) (%d).",
report_data->FormatHandle(queryPool).c_str(), stride, query_items,
sizeof(VkPerformanceCounterResultKHR));
}
break;
// These cases intentionally fall through to the default
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
default:
query_size = 0;
break;
}
if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817",
"vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is "
"incompatible with the specified query type and options.",
report_data->FormatHandle(queryPool).c_str(), dataSize);
}
}
}
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else {
error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046";
}
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621";
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem_info->mem);
objlist.add(typed_handle);
skip = LogError(objlist, error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
api_name);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = LogError(mem_info->mem, msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0;
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
skip = ValidateSetMemBinding(mem, obj_struct, api_name);
const auto mem_info = GetDevMemState(mem);
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036";
skip |= LogError(buffer, vuid,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate bound memory range information
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name);
const char *mem_type_vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035";
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid);
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037";
skip |= LogError(buffer, vuid,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && (mem_info->dedicated_buffer != VK_NULL_HANDLE) &&
((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508";
LogObjectList objlist(buffer);
objlist.add(mem);
objlist.add(mem_info->dedicated_buffer);
skip |= LogError(objlist, vuid,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (enabled_features.core12.bufferDeviceAddress &&
(buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) &&
(!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) {
skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339",
"%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, "
"memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.",
api_name);
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one "
"handle from VkBuffer (%s) handleType %s.",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02985";
} else if ((!bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02985";
} else if ((bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02727";
} else if ((!bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02727";
}
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which "
"is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
} else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfo *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()");
}
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588",
"vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT "
"(need to use vkGetImageMemoryRequirements2).",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name);
}
const IMAGE_STATE *image_state = GetImageState(pInfo->image);
const VkFormat image_format = image_state->createInfo.format;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext);
if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589",
"%s: %s image was created with a multi-planar format (%s) and "
"VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a "
"VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if ((image_state->disjoint == false) && (image_plane_info != nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590",
"%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str());
}
if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) &&
(image_plane_info != nullptr)) {
const char *vuid = device_extensions.vk_ext_image_drm_format_modifier ? "VUID-VkImageMemoryRequirementsInfo2-image-02280"
: "VUID-VkImageMemoryRequirementsInfo2-image-01591";
skip |= LogError(pInfo->image, vuid,
"%s: %s image is a single-plane format (%s) and does not have tiling of "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if (image_plane_info != nullptr) {
if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) {
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_format);
VkImageAspectFlags aspect = image_plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()");
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) const {
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) const {
const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= LogError(cb_node->commandBuffer, error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) const {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860",
"vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created "
"with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!enabled_features.performance_query_features.performanceCounterQueryPools) {
skip |=
LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with "
"VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE.");
}
auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
if (!perf_ci) {
skip |= LogError(
device, "VUID-VkQueryPoolCreateInfo-queryType-03222",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of "
"pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR.");
} else {
const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex);
if (perf_counter_iter == physical_device_state->perf_counters.end()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index.");
} else {
const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get();
for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) {
if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid "
"counter index.",
idx, perf_ci->pCounterIndices[idx]);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) const {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
const auto fence_state = GetFenceState(pFences[i]);
if (fence_state && fence_state->scope == kSyncScopeInternal && fence_state->state == FENCE_INFLIGHT) {
skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) const {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) const {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext);
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name,
const char *vuid) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
const VkPipelineCreateFlags invalid_flags =
VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT;
if ((flags & invalid_flags) != 0) {
skip |= LogError(device, vuid,
"%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags "
"containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or "
"VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT",
caller_name, index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) {
skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892",
"vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains "
"VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
for (uint32_t i = 0; i < count; ++i) {
// Validate depth-stencil state
auto raster_state_ci = pCreateInfos[i].pRasterizationState;
if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci &&
(VK_CULL_MODE_NONE == raster_state_ci->cullMode)) {
auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState;
if ((VK_TRUE == depth_stencil_ci->stencilTestEnable) &&
(depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) {
skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453",
"Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the "
"same for front and back",
i);
}
}
// Validate color attachments
auto color_blend_state = pCreateInfos[i].pColorBlendState;
if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) && color_blend_state) {
const auto attachments = color_blend_state->pAttachments;
for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) {
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454",
"Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) ||
(VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455",
"Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must "
"not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA",
i, color_attachment_index);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get());
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines",
"VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416",
"vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV",
"VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count,
pCreateInfos, pAllocator, pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416",
"vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR",
"VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270",
"vkGetPipelineExecutablePropertiesKHR called when pipelineExecutableInfo feature is not enabled.");
}
return skip;
}
bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272",
"vkGetPipelineExecutableStatisticsKHR called when pipelineExecutableInfo feature is not enabled.");
}
VkPipelineInfoKHR pi = {};
pi.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
pi.pipeline = pExecutableInfo->pipeline;
// We could probably cache this instead of fetching it every time
uint32_t executable_count = 0;
DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL);
if (pExecutableInfo->executableIndex >= executable_count) {
skip |=
LogError(pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275",
"VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with "
"the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR",
pExecutableInfo->executableIndex, executable_count);
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274",
"vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR(
VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278",
"vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) const {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors,
IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12,
&enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions);
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name,
uint32_t index = 0) const {
if (disabled[push_constant_range]) return false;
uint32_t const max_push_constants_size = phys_dev_props.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= max_push_constants_size) || (size > max_push_constants_size - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= max_push_constants_size) {
skip |= LogError(
device, "VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, max_push_constants_size);
}
if (size > max_push_constants_size - offset) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, max_push_constants_size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= max_push_constants_size) {
skip |= LogError(
device, "VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, max_push_constants_size);
}
if (size > max_push_constants_size - offset) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, max_push_constants_size);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= LogError(device, "VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= LogError(device, "VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) const {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size,
"vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= LogError(device, "VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
}
if (push_descriptor_set_count > 1) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
if (device_extensions.vk_ext_fragment_density_map_2) {
uint32_t sum_subsampled_samplers = 0;
for (auto dsl : set_layouts) {
// find the number of subsampled samplers across all stages
// NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method
if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) &&
(binding->pImmutableSamplers != nullptr)) {
for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) {
const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]);
if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT |
VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) {
sum_subsampled_samplers++;
}
}
}
}
}
}
if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566",
"vkCreatePipelineLayout(): sum of sampler bindings with flags containing "
"VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or "
"VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) "
"exceeds device maxDescriptorSetSubsampledSamplers limit (%d).",
sum_subsampled_samplers,
phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers);
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) const {
// Make sure sets being destroyed are not currently in-use
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
const DESCRIPTOR_POOL_STATE *pool = GetDescriptorPoolState(descriptorPool);
if (pool != nullptr) {
for (auto ds : pool->sets) {
if (ds && ds->in_use.load()) {
skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) const {
StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data);
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->in_use.load()) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Primary Command Buffer
const VkCommandBufferUsageFlags invalid_usage =
(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
if ((pBeginInfo->flags & invalid_usage) == invalid_usage) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840",
"vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(commandBuffer).c_str());
}
} else {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo;
if (!info) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(info->renderPass);
const auto *framebuffer = GetFramebufferState(info->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != info->renderPass) {
const auto *render_pass = GetRenderPassState(info->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
}
if (info && info->renderPass != VK_NULL_HANDLE) {
const auto *render_pass = GetRenderPassState(info->renderPass);
if (render_pass) {
if (info->subpass >= render_pass->createInfo.subpassCount) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), info->subpass,
render_pass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : cb_state->activeQueries) {
skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061",
"vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
VkCommandPool cmd_pool = cb_state->createInfo.commandPool;
const auto *pool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmd_pool);
skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046",
"vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str());
}
skip |= CheckCommandBufferInFlight(cb_state, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV:
return "ray-tracing";
default:
return "unknown";
}
}
bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const {
bool skip = false;
const FRAMEBUFFER_STATE *fb_state = cb_state->activeFramebuffer.get();
if (fb_state) {
auto subpass_desc = &pipeline_state->rp_state->createInfo.pSubpasses[pipeline_state->graphicsPipelineCI.subpass];
for (size_t i = 0; i < pipeline_state->attachments.size() && i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
const auto *imageview_state = GetActiveAttachmentImageViewState(cb_state, attachment);
if (!imageview_state) continue;
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (!image_state) continue;
const VkFormat format = pipeline_state->rp_state->createInfo.pAttachments[attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(format);
if (pipeline_state->graphicsPipelineCI.pRasterizationState &&
!pipeline_state->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable &&
pipeline_state->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-02023",
"vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].blendEnable is VK_TRUE but format %s associated with this attached image (%s) does "
"not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.",
i, report_data->FormatHandle(image_state->image).c_str(), string_VkFormat(format));
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto &pipeline_state_bind_point = pipeline_state->getPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
} else {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()");
if (!enabled_features.exclusive_scissor.exclusiveScissor) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= LogError(
imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
const VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= LogError(
commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) {
skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device,
const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureKHR *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo) {
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614",
"VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing "
"VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR.");
}
if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) {
skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615",
"VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with "
"VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT.");
}
if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) {
skip |= LogError(
device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616",
"VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer.");
}
}
}
return skip;
}
bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device,
const VkBindAccelerationStructureMemoryInfoNV &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(info.accelerationStructure);
if (!as_state) {
return skip;
}
if (!as_state->GetBoundMemory().empty()) {
skip |=
LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623",
"vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64
" must be an integer multiple of the alignment 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from "
"a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624",
"vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64
" member of the VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size "
"of memory minus memoryOffset 0x%" PRIxLEAST64 ".",
as_state->memory_requirements.memoryRequirements.size,
mem_info->alloc_info.allocationSize - info.memoryOffset);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR(
VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructuresKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructuresKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESKHR, "vkCmdBuildAccelerationStructuresKHR()");
skip |=
InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructuresKHR()", "VUID-vkCmdBuildAccelerationStructuresKHR-renderpass");
if (pInfos != NULL) {
for (uint32_t info_index = 0; info_index < infoCount; ++info_index) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state =
GetAccelerationStructureStateKHR(pInfos[info_index].dstAccelerationStructure);
if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (!src_as_state || (src_as_state && src_as_state->acceleration_structure == VK_NULL_HANDLE)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03666",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be "
"VK_NULL_HANDLE.");
}
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must "
"have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[info_index].type != src_as_state->build_info_khr.type) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |=
LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR(
VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const {
bool skip = false;
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (!src_as_state || (src_as_state && !src_as_state->acceleration_structure)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03666",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must not be "
"VK_NULL_HANDLE.");
}
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699",
"vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()");
skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructureNV()", "VUID-vkCmdBuildAccelerationStructureNV-renderpass");
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_infoNV.info.type != pInfo->type) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_infoNV.info.flags != pInfo->flags) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_infoNV.info.flags, pInfo->flags);
}
if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |=
LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
if (instanceData != VK_NULL_HANDLE) {
const auto buffer_state = GetBufferState(instanceData);
if (buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-instanceData-02782",
"vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
}
if (scratch_buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()",
"VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureNV()", "VUID-vkCmdCopyAccelerationStructureNV-renderpass");
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410",
"vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"
"or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureKHR);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureKHR",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
if (pAllocator && !as_state->allocator) {
skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444",
"vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure"
"was created, pAllocator must be NULL.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkViewportWScalingNV *pViewportWScalings) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWScalingNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportWScalingNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor,
uint16_t lineStipplePattern) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineStippleEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineStippleEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted) {
if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
minDepthBounds);
}
if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) {
// Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509",
"vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds "
"(=%f) is not within the [0.0, 1.0] range.",
maxDepthBounds);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
return skip;
}
bool CoreChecks::ValidateDynamicOffsetAlignment(VkCommandBuffer command_buffer, const VkDescriptorSetLayoutBinding *binding,
VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets,
const char *err_msg, const char *limit_name, uint32_t *offset_idx) const {
bool skip = false;
if (binding->descriptorType == test_type) {
const auto end_idx = *offset_idx + binding->descriptorCount;
for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) {
if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) {
skip |= LogError(
command_buffer, err_msg,
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64
".",
current_idx, pDynamicOffsets[current_idx], limit_name, alignment);
}
}
*offset_idx = end_idx;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping "
"descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u "
"dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
const auto dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
limits.minUniformBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"minUniformBufferOffsetAlignment", &cur_dyn_offset);
skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
limits.minStorageBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"minStorageBufferOffsetAlignment", &cur_dyn_offset);
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= LogError(pDescriptorSets[set_idx], kVUID_Core_DrawState_InvalidSet,
"vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!",
report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but "
"dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
// firstSet and descriptorSetCount sum must be less than setLayoutCount
if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-firstSet-00360",
"vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than "
"VkPipelineLayoutCreateInfo::setLayoutCount "
"(%zu) when pipeline layout was created",
firstSet, setCount, pipeline_layout->set_layouts.size());
}
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(cb_state->createInfo.commandPool);
skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
const auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
if (set < set_layouts.size()) {
const auto dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) const {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
if (offset >= buffer_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
}
return skip;
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = LogError(image_state->image, msgCode, "%s for %s was created with a sample count of %s but must be %s.", location,
report_data->FormatHandle(image_state->image).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
skip |=
ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813");
skip |=
ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-04090",
"VUID-vkCmdSetEvent-stageMask-04091", "VUID-vkCmdSetEvent-stageMask-04095",
"VUID-vkCmdSetEvent-stageMask-04096");
skip |= ValidateStageMaskHost(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01149");
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-04090",
"VUID-vkCmdResetEvent-stageMask-04091", "VUID-vkCmdResetEvent-stageMask-04095",
"VUID-vkCmdResetEvent-stageMask-04096");
skip |= ValidateStageMaskHost(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01153");
return skip;
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
// Verify image barrier image state and that the image is consistent with FB image
bool CoreChecks::ValidateImageBarrierAttachment(const char *funcName, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
uint32_t img_index, const VkImageMemoryBarrier &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const {
bool skip = false;
const auto *fb_state = framebuffer;
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachment_count = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachment_count; ++attachment) {
auto view_state = GetActiveAttachmentImageViewState(cb_state, attachment, primary_cb_state);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
}
if (!sub_image_found && device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
}
if (!sub_image_found) {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
}
if (!sub_image_found && sub_desc.pResolveAttachments &&
sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-image-04073",
"%s: Barrier pImageMemoryBarriers[%d].%s is not referenced by the VkSubpassDescription for "
"active subpass (%d) of current %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
skip |=
LogError(fb_state->framebuffer, "VUID-vkCmdPipelineBarrier-image-04073",
"%s: Barrier pImageMemoryBarriers[%d].%s does not match an image from the current %s.", funcName, img_index,
report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_state->framebuffer).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
funcName, report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
LogObjectList objlist(rp_handle);
objlist.add(img_bar_image);
skip |= LogError(objlist, "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: Barrier pImageMemoryBarriers[%d].%s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// Validate image barriers within a renderPass
bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const safe_VkSubpassDependency2 *dependencies,
const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(funcName, cb_state, cb_state->activeFramebuffer.get(), active_subpass, sub_desc,
rp_handle, i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", funcName,
active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
auto disabled_stage_mask = sync_utils::DisabledPipelineStages(enabled_features);
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto sub_src_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_stage_mask);
const auto sub_dst_stage_mask =
sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_stage_mask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of %s for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of %s for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", funcName,
buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(
rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(
rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_state->renderPass, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass,
report_data->FormatHandle(rp_state->renderPass).c_str(), self_dep_ss.str().c_str());
}
}
return skip;
}
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(const DeviceFeatures &features, VkQueueFlags queue_flags, VkAccessFlags access_mask,
VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
// or if only generic memory accesses are specified (or we got a 0 mask)
access_mask &= ~(VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT);
if (access_mask == 0) return true;
auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags, sync_utils::DisabledPipelineStages(features));
auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages);
return ((access_mask & ~valid_accesses) == 0);
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcAndDstValidOrSpecial,
kSrcAndDestMustBeIgnore,
kSrcAndDstBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string kImageErrorCodes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-04071", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-04072", // kSrcAndDstValidOrSpecial
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-04069", // kSrcAndDstBothValid
"UNASSIGNED-CoreValidation-vkImageMemoryBarrier-sharing-mode-exclusive-same-family", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string kBufferErrorCodes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-04088", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-04089", // kSrcAndDstValidOrSpecial
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-04086", // kSrcAndDstBothValid
"UNASSIGNED-CoreValidation-vkBufferMemoryBarrier-sharing-mode-exclusive-same-family", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const ValidationStateTracker *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode)
: device_data_(device_data),
func_name_(func_name),
command_buffer_(cb_state->commandBuffer),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
val_codes_(barrier_handle.type == kVulkanObjectTypeImage ? kImageErrorCodes : kBufferErrorCodes),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(IsExtEnabled(device_data->device_extensions.vk_khr_external_memory)) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string &val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return device_data_->LogError(command_buffer_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(),
GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string &val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return device_data_->LogError(
command_buffer_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(), GetModeString(),
src_family, src_annotation, dst_family, dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const QUEUE_STATE *queue_state, const ValidationStateTracker *device_data,
uint32_t src_family, uint32_t dst_family, const ValidatorState &val) {
uint32_t queue_family = queue_state->queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return device_data->LogError(
queue_state->queue, val_code,
"%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && QueueFamilyIsExternal(queue_family));
}
// Helpers for LogMsg
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const ValidationStateTracker *device_data_;
const char *const func_name_;
const VkCommandBuffer command_buffer_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = QueueFamilyIsIgnored(src_queue_family);
const bool dst_ignored = QueueFamilyIsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || QueueFamilyIsExternal(dst_queue_family))) ||
(dst_ignored && !(src_ignored || QueueFamilyIsExternal(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_queue_family != dst_queue_family) {
if (!val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kSrcAndDstValidOrSpecial, dst_queue_family, "dstQueueFamilyIndex");
}
if (!val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcAndDstValidOrSpecial, src_queue_family, "srcQueueFamilyIndex");
}
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if ((src_queue_family != dst_queue_family) && !(val.IsValid(src_queue_family) && val.IsValid(dst_queue_family))) {
skip |= val.LogMsg(kSrcAndDstBothValid, src_queue_family, dst_queue_family);
}
}
}
return skip;
}
} // namespace barrier_queue_families
bool CoreChecks::ValidateConcurrentBarrierAtSubmit(const ValidationStateTracker *state_data, const QUEUE_STATE *queue_state,
const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &typed_handle, uint32_t src_queue_family,
uint32_t dst_queue_family) {
using barrier_queue_families::ValidatorState;
ValidatorState val(state_data, func_name, cb_state, typed_handle, VK_SHARING_MODE_CONCURRENT);
return ValidatorState::ValidateAtQueueSubmit(queue_state, state_data, src_queue_family, dst_queue_family, val);
}
// Type specific wrapper for image barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkImageMemoryBarrier &barrier, const IMAGE_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, func_name, cb_state, VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage),
state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkBufferMemoryBarrier &barrier, const BUFFER_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(
this, func_name, cb_state, VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
bool CoreChecks::ValidateBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
auto queue_flags = GetQueueFlags(*cb_state);
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(mem_barrier.image);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data);
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
skip |= ValidateMemoryIsBoundToImage(image_data, funcName, "VUID-VkBufferMemoryBarrier-buffer-01931");
const auto aspect_mask = mem_barrier.subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName);
const std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier.subresourceRange, funcName, param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName,
i, mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(enabled_features, queue_flags, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName,
i, mem_barrier.dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, "VUID-VkBufferMemoryBarrier-buffer-01931");
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
if (mem_barrier.size == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-size-01188",
"%s: Buffer Barrier %s has a size of 0.", funcName,
report_data->FormatHandle(mem_barrier.buffer).c_str());
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount,
pImageMemBarriers);
return skip;
}
bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount,
size_t firstEventIndex, VkPipelineStageFlags sourceStageMask,
EventToStageMap *localEventToStageMap) {
bool skip = false;
VkPipelineStageFlags stage_mask = 0;
const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size());
for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) {
auto event = pCB->events[event_index];
auto event_data = localEventToStageMap->find(event);
if (event_data != localEventToStageMap->end()) {
stage_mask |= event_data->second;
} else {
auto global_event_data = state_data->GetEventState(event);
if (!global_event_data) {
skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.",
state_data->report_data->FormatHandle(event).c_str());
} else {
stage_mask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= state_data->LogError(
pCB->commandBuffer, "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stage_mask);
}
return skip;
}
static const VkQueueFlagBits kQueueTypeArray[] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_TRANSFER_BIT};
bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
const char *error_code) const {
bool skip = false;
auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, queue_flags);
auto expanded = sync_utils::ExpandPipelineStages(stage_mask, queue_flags);
auto bad_flags = expanded & ~supported_flags;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) {
VkPipelineStageFlags item = (1ULL << i) & bad_flags;
if (item) {
skip |= LogError(command_buffer, error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
auto pool = cb_state->command_pool.get();
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags source_stage_mask,
VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
const char *error_code) const {
bool skip = false;
uint32_t queue_family_index = cb_state->command_pool->queueFamilyIndex;
auto physical_device_state = GetPhysicalDeviceState();
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function,
"srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function,
"dstStageMask", error_code);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-4098");
skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-04090",
"VUID-vkCmdWaitEvents-srcStageMask-04091", "VUID-vkCmdWaitEvents-srcStageMask-04095",
"VUID-vkCmdWaitEvents-srcStageMask-04096");
skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-04090",
"VUID-vkCmdWaitEvents-dstStageMask-04091", "VUID-vkCmdWaitEvents-dstStageMask-04095",
"VUID-vkCmdWaitEvents-dstStageMask-04096");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, sourceStageMask](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarrierValidationInfo("vkCmdWaitEvents", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
if (bufferMemoryBarrierCount || imageMemoryBarrierCount) {
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-4098");
}
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |=
ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-04090",
"VUID-vkCmdPipelineBarrier-srcStageMask-04091", "VUID-vkCmdPipelineBarrier-srcStageMask-04095",
"VUID-vkCmdPipelineBarrier-srcStageMask-04096");
skip |=
ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-04090",
"VUID-vkCmdPipelineBarrier-dstStageMask-04091", "VUID-vkCmdPipelineBarrier-dstStageMask-04095",
"VUID-vkCmdPipelineBarrier-dstStageMask-04096");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::EnqueueSubmitTimeValidateImageBarrierAttachment(const char *func_name, CMD_BUFFER_STATE *cb_state,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if ((cb_state->activeRenderPass) && (VK_NULL_HANDLE == cb_state->activeFramebuffer) &&
(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level)) {
const auto active_subpass = cb_state->activeSubpass;
const auto rp_state = cb_state->activeRenderPass;
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &img_barrier = pImageMemBarriers[i];
// Secondary CB case w/o FB specified delay validation
auto *this_ptr = this; // Required for older compilers with c++20 compatibility
cb_state->cmd_execute_commands_functions.emplace_back(
[=](const CMD_BUFFER_STATE *primary_cb, const FRAMEBUFFER_STATE *fb) {
return this_ptr->ValidateImageBarrierAttachment(func_name, cb_state, fb, active_subpass, sub_desc,
rp_state->renderPass, i, img_barrier, primary_cb);
});
}
}
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdPipelineBarrier";
RecordBarrierValidationInfo(func_name, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(func_name, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd,
const char *cmd_name, const ValidateBeginQueryVuids *vuids) const {
bool skip = false;
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQuery-queryType-02804",
"%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name);
}
// Check for nexted queries
if (cb_state->activeQueries.size()) {
for (auto a_query : cb_state->activeQueries) {
auto active_query_pool_state = GetQueryPoolState(a_query.pool);
if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType) {
LogObjectList obj_list(cb_state->commandBuffer);
obj_list.add(query_obj.pool);
obj_list.add(a_query.pool);
skip |= LogError(obj_list, vuids->vuid_dup_query_type,
"%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query "
"%d from pool %s.",
cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), query_obj.index,
report_data->FormatHandle(query_obj.pool).c_str(), a_query.index,
report_data->FormatHandle(a_query.pool).c_str());
}
}
}
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!cb_state->performance_lock_acquired) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_profile_lock,
"%s: profiling lock must be held before vkBeginCommandBuffer is called on "
"a command buffer where performance queries are recorded.",
cmd_name);
}
if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_not_first,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded "
"command in the command buffer.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_in_rp,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
struct BeginQueryVuids : ValidateBeginQueryVuids {
BeginQueryVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327";
vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQuery-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922";
vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885";
}
};
BeginQueryVuids vuids;
return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()", &vuids);
}
bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj,
const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
bool skip = false;
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
// If reset was in another command buffer, check the global map
if (state == QUERYSTATE_UNKNOWN) {
state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass);
}
// Performance queries have limitation upon when they can be
// reset.
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN &&
perfPass >= query_pool_state->n_performance_passes) {
// If the pass is invalid, assume RESET state, another error
// will be raised in ValidatePerformanceQuery().
state = QUERYSTATE_RESET;
}
if (state != QUERYSTATE_RESET) {
skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset,
"%s: %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false;
const CMD_BUFFER_STATE *cb_state = state_data->GetCBState(commandBuffer);
bool skip = false;
if (perfPass >= query_pool_state->n_performance_passes) {
skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221",
"Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass,
query_pool_state->n_performance_passes,
state_data->report_data->FormatHandle(query_obj.pool).c_str());
}
if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) {
skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220",
"Commandbuffer %s was submitted and contains a performance query but the"
"profiling lock was not held continuously throughout the recording of commands.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
if (command_buffer_state == QUERYSTATE_RESET) {
skip |= state_data->LogError(
commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863",
"VkQuery begin command recorded in a command buffer that, either directly or "
"through secondary command buffers, also contains a vkCmdResetQueryPool command "
"affecting the same query.");
}
if (firstPerfQueryPool != VK_NULL_HANDLE) {
if (firstPerfQueryPool != query_obj.pool &&
!state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) {
skip |= state_data->LogError(
commandBuffer,
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226",
"Commandbuffer %s contains more than one performance query pool but "
"performanceCounterMultipleQueryPools is not enabled.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
} else {
firstPerfQueryPool = query_obj.pool;
}
return skip;
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
return skip;
});
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()");
}
void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
const CMD_BUFFER_STATE *cb_state = device_data->GetCBState(command_buffer);
const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool);
if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) {
skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227",
"vkCmdEndQuery: Query pool %s was created with a counter of scope"
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last "
"command in the command buffer %s.",
device_data->report_data->FormatHandle(query_obj.pool).c_str(),
device_data->report_data->FormatHandle(command_buffer).c_str());
}
return skip;
});
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd,
const char *cmd_name, const ValidateEndQueryVuids *vuids) const {
bool skip = false;
if (!cb_state->activeQueries.count(query_obj)) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-queryPool-03228",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd, cmd_name);
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
bool skip = false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
// Only continue validating if the slot is even within range
if (slot >= available_query_count) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-query-00810",
"vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot,
available_query_count);
} else {
struct EndQueryVuids : ValidateEndQueryVuids {
EndQueryVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQuery-None-01923";
vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886";
}
};
EndQueryVuids vuids;
skip |= ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", &vuids);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, slot};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name,
const char *first_vuid, const char *sum_vuid) const {
bool skip = false;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
if (firstQuery >= available_query_count) {
skip |= LogError(queryPool, first_vuid,
"%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name,
report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count);
}
if ((firstQuery + queryCount) > available_query_count) {
skip |=
LogError(queryPool, sum_vuid,
"%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).",
func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()",
"VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797");
return skip;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_UNKNOWN;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass,
VkQueryResultFlags flags, QueryMap *localQueryToStateMap) {
bool skip = false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) {
skip |= state_data->LogError(
commandBuffer, kVUID_Core_DrawState_InvalidQuery,
"vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00820",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
if (dstOffset >= dst_buff_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819",
"vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer).c_str());
} else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) {
skip |=
LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824",
"vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64
") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).",
dstOffset + (queryCount * stride), dst_buff_state->requirements.size,
report_data->FormatHandle(dst_buff_state->buffer).c_str());
}
auto query_pool_state_iter = queryPoolMap.find(queryPool);
if (query_pool_state_iter != queryPoolMap.end()) {
auto query_pool_state = query_pool_state_iter->second.get();
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232",
"vkCmdCopyQueryPoolResults called with query pool %s but "
"VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies "
"is not set.",
report_data->FormatHandle(queryPool).c_str());
}
}
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827",
"vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not "
"contain VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734",
"vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled[query_validation]) return;
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags,
localQueryToStateMap);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
skip |=
LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32
"), and size (%" PRIu32 ") in %s.",
string_VkShaderStageFlags(stageFlags).c_str(), offset, size,
string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= LogError(
commandBuffer, "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.",
string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size,
string_VkShaderStageFlags(missing_stages).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-queryPool-01416",
"vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.",
report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestamp_valid_bits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestamp_valid_bits == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-timestampValidBits-00829",
"vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.",
report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()";
cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; i++) {
QueryObject query = {{queryPool, firstQuery + i}, perfPass};
skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
}
return skip;
});
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
auto creation_usage = ici->usage;
const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici->pNext);
if (stencil_usage_info) {
creation_usage |= stencil_usage_info->stencilUsage;
}
if ((creation_usage & usage_flag) == 0) {
skip |= LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfo *fbaci =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |=
LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) {
if (!enabled_features.core12.imagelessFramebuffer) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (framebuffer_attachments_create_info == nullptr) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, "
"but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain.");
} else {
if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 &&
framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |= LogError(
image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that image memory is valid
auto image_data = GetImageState(ivci.image);
skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()",
"UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess");
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (uint32_t k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (ivci.subresourceRange.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-renderPass-04536",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, ivci.subresourceRange.layerCount, highest_view_bit, j);
}
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level "
"%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"width (%u) and the "
"specified shading rate texel width (%u) are smaller than the "
"corresponding framebuffer width (%u).",
i, ivci.subresourceRange.baseMipLevel, j, mip_width,
fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) <
pCreateInfo->height) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u "
"is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, ivci.subresourceRange.baseMipLevel, j, mip_height,
fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (ivci.subresourceRange.layerCount <= highest_view_bit) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04537",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, ivci.subresourceRange.layerCount, highest_view_bit, j);
}
}
}
}
if (enabled_features.fragment_density_map_features.fragmentDensityMap) {
const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment;
fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext);
if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) {
uint32_t ceiling_width = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->width) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width),
1.0f)));
if (mip_width < ceiling_width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02555",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding the ceiling of framebuffer width / "
"maxFragmentDensityTexelSize.width "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"width: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_width, ceiling_width);
}
uint32_t ceiling_height = static_cast<uint32_t>(ceil(
static_cast<float>(pCreateInfo->height) /
std::max(static_cast<float>(
phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height),
1.0f)));
if (mip_height < ceiling_height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02556",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding the ceiling of framebuffer height / "
"maxFragmentDensityTexelSize.height "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"height: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_height, ceiling_height);
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (mip_width < pCreateInfo->width) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding framebuffer width (%u).",
i, mip_width, pCreateInfo->width);
}
if (mip_height < pCreateInfo->height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04534",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding framebuffer height (%u).",
i, mip_height, pCreateInfo->height);
}
if (ivci.subresourceRange.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (ivci.subresourceRange.layerCount != 1 && ivci.subresourceRange.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
}
if (IsIdentitySwizzle(ivci.components) == false) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) {
const auto image_state = GetImageState(ivci.image);
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if (FormatIsDepthOrStencil(ivci.format)) {
LogObjectList objlist(device);
objlist.add(ivci.image);
skip |= LogError(
objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of "
"%s "
"which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a "
"depth/stencil format %s",
i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(),
string_VkFormat(ivci.format));
}
}
}
if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) {
LogObjectList objlist(device);
objlist.add(image_views[i]);
skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type "
"of VK_IMAGE_VIEW_TYPE_3D",
i);
}
}
}
} else if (framebuffer_attachments_create_info) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i];
bool format_found = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
bool used_as_input_color_resolve_depth_stencil_attachment = false;
bool used_as_fragment_shading_rate_attachment = false;
bool fsr_non_zero_viewmasks = false;
for (uint32_t j = 0; j < rpci->subpassCount; ++j) {
const VkSubpassDescription2 &subpass = rpci->pSubpasses[j];
uint32_t highest_view_bit = 0;
for (int k = 0; k < 32; ++k) {
if (((subpass.viewMask >> k) & 1) != 0) {
highest_view_bit = k;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) {
if (subpass.pInputAttachments[k].attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) {
if (subpass.pColorAttachments[k].attachment == i ||
(subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) {
used_as_input_color_resolve_depth_stencil_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) {
used_as_input_color_resolve_depth_stencil_attachment = true;
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment;
fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext);
if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) {
used_as_fragment_shading_rate_attachment = true;
if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04543",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its width (%u) and the "
"specified shading rate texel width (%u) are smaller than the corresponding framebuffer "
"width (%u).",
i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width);
}
if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a "
"fragment shading rate attachment in subpass %u, but the product of its "
"height (%u) and the "
"specified shading rate texel height (%u) are smaller than the corresponding "
"framebuffer height (%u).",
i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height,
pCreateInfo->height);
}
if (highest_view_bit != 0) {
fsr_non_zero_viewmasks = true;
}
if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) {
skip |= LogError(
device, kVUIDUndefined,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"less than or equal to the highest bit in the view mask (%u) of subpass %u.",
i, aii.layerCount, highest_view_bit, j);
}
}
}
}
if (used_as_input_color_resolve_depth_stencil_attachment) {
if (aii.width < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04541",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-04542",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
const char *mismatched_layers_no_multiview_vuid = device_extensions.vk_khr_multiview
? "VUID-VkFramebufferCreateInfo-renderPass-04546"
: "VUID-VkFramebufferCreateInfo-flags-04547";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |= LogError(
device, mismatched_layers_no_multiview_vuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) {
if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) "
"smaller than the corresponding framebuffer layer count (%u).",
i, aii.layerCount, pCreateInfo->layers);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr) {
skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext);
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
fragment_shading_rate_attachment_info != nullptr) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo,
VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04549");
}
}
if (device_extensions.vk_khr_multiview) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr &&
depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass];
// Verify input attachments:
skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
// Verify depth/stecnil resolve
if (device_extensions.vk_khr_depth_stencil_resolve) {
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext);
if (ds_resolve) {
skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-02634");
}
}
// Verify fragment shading rate attachments
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext);
if (fragment_shading_rate_attachment_info) {
skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment,
pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR,
"VUID-VkFramebufferCreateInfo-flags-04548");
}
}
}
}
bool b_has_non_zero_view_masks = false;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
if (rpci->pSubpasses[i].viewMask != 0) {
b_has_non_zero_view_masks = true;
break;
}
}
if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531",
"vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but "
"renderPass (%s) was specified with non-zero view masks\n",
pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout,
const std::vector<SubpassLayout> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
bool b_image_layout_read_only = IsImageLayoutReadOnly(layout);
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
const SubpassLayout &sp = dependent_subpasses[k];
if (subpass == sp.index) continue;
if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index);
auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) ||
FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) {
skip |=
LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth,
bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const framebuffer_info = framebuffer->createInfo.ptr();
auto const create_info = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
struct Attachment {
std::vector<SubpassLayout> outputs;
std::vector<SubpassLayout> inputs;
std::vector<uint32_t> overlapping;
};
std::vector<Attachment> attachments(create_info->attachmentCount);
if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) {
// Find overlapping attachments
for (uint32_t i = 0; i < create_info->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) {
VkImageView viewi = framebuffer_info->pAttachments[i];
VkImageView viewj = framebuffer_info->pAttachments[j];
if (viewi == viewj) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image &&
IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem_state == image_data_j->binding.mem_state &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
}
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachment_indices;
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
attachment_indices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pInputAttachments[j].layout};
attachments[attachment].inputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].inputs.emplace_back(sp);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pColorAttachments[j].layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
attachment_indices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
if (attachment_indices.count(attachment)) {
skip |=
LogError(renderPass->renderPass, kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs,
subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].outputs, subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].inputs, subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < create_info->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = create_info->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(renderPass->renderPass, create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0,
skip);
}
}
return skip;
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i];
auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091";
}
}
skip |=
LogError(device, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= LogError(device, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= LogError(
device, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, string_VkPipelineStageFlags(latest_src_stage).c_str(),
string_VkPipelineStageFlags(earliest_dst_stage).c_str());
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) &&
(HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) &&
((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with both stages including a "
"framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.",
i, dependency.srcSubpass);
}
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *error_type, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
assert(attachment != VK_ATTACHMENT_UNUSED);
if (attachment >= attachment_count) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name,
error_type, attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).",
function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]),
string_VkImageLayout(new_layout));
}
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass,
attachment, StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
// Handles attachment references regardless of type (input, color, depth, etc)
// Input attachments have extra VUs associated with them
bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference, bool input,
const char *error_type, const char *function_name) const {
bool skip = false;
// Currently all VUs require attachment to not be UNUSED
assert(reference.attachment != VK_ATTACHMENT_UNUSED);
// currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs
if (rp_version == RENDER_PASS_VERSION_1) {
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857",
"%s: Layout for %s is %s but must not be "
"VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_"
"ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
default:
break;
}
} else {
const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext);
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
skip |=
LogError(device, "VUID-VkAttachmentReference2-layout-03077",
"%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
// This check doesn't rely on the aspect mask value
if (attachment_reference_stencil_layout) {
const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout;
// clang-format off
if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED ||
stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318",
"%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, "
"the stencilLayout (%s) must not be "
"VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.",
function_name, error_type, string_VkImageLayout(stencil_layout));
}
}
// clang-format on
break;
default:
break;
}
// Extra case to check for all 4 seperate depth/stencil layout
// This makes the above switch case much easier to read
switch (reference.layout) {
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
if (!enabled_features.core12.separateDepthStencilLayouts) {
skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313",
"%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
}
default:
break;
}
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo,
const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |=
LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
// Track if attachments are used as input as well as another type
std::unordered_set<uint32_t> input_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-03062"
: "VUID-VkSubpassDescription-pipelineBindPoint-00844";
skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS.",
function_name, i);
}
// Check input attachments first
// - so we can detect first-use-as-input for VU #00349
// - if other color or depth/stencil is also input, it limits valid layouts
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
input_attachments.insert(attachment_index);
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]";
skip |= ValidateAttachmentReference(rp_version, attachment_ref, true, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801"
: "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= LogError(
device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, j, i);
} else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563"
: "VUID-VkInputAttachmentAspectReference-aspectMask-02250";
skip |= LogError(device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes "
"VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.",
function_name, j, i);
}
if (attachment_index < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT,
attachment_ref.layout);
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_index].format, aspect_mask,
function_name, vuid);
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
bool used_as_depth = (subpass.pDepthStencilAttachment != NULL &&
subpass.pDepthStencilAttachment->attachment == attachment_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= LogError(device, vuid,
"%s: attachment %u is first used as an input attachment in %s with loadOp set to "
"VK_ATTACHMENT_LOAD_OP_CLEAR.",
function_name, attachment_index, error_type.c_str());
}
}
attach_first_use[attachment_index] = false;
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (aspect_mask == 0) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800",
"%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str());
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT);
// Check for valid aspect mask bits
if (aspect_mask & ~valid_bits) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799",
"%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name,
error_type.c_str(), aspect_mask);
}
}
}
const VkFormatFeatureFlags valid_flags =
VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & valid_flags) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897"
: "VUID-VkSubpassDescription-pInputAttachments-02647";
skip |= LogError(device, vuid,
"%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT "
"| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
// Validate layout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but input attachments must be "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, or VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]";
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentReference(rp_version, attachment_ref, false, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount,
error_type.c_str(), function_name);
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= LogError(
device, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899"
: "VUID-VkSubpassDescription-pResolveAttachments-02649";
skip |= LogError(device, vuid,
"%s: Resolve attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
}
}
if (subpass.pDepthStencilAttachment) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment";
const uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout;
if (attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, false, error_type.c_str(),
function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH,
image_layout);
if (attach_first_use[attachment]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment,
pCreateInfo->pAttachments[attachment]);
}
attach_first_use[attachment] = false;
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-02650";
skip |= LogError(device, vuid,
"%s: Depth Stencil %s format (%s) does not contain "
"VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (image_layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
if (input_attachments.find(attachment) != input_attachments.end()) {
skip |= LogError(
device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but depth/stencil attachments must be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(image_layout));
break;
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pColorAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentReference(rp_version, attachment_ref, false, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment_index < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR,
attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |= LogError(
device, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_index);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (device_extensions.vk_amd_mixed_attachment_samples) {
if (pCreateInfo->pAttachments[attachment_index].samples > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |= LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.",
function_name, error_type.c_str(),
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_index].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!device_extensions.vk_amd_mixed_attachment_samples &&
!device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= LogError(device, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample "
"count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898"
: "VUID-VkSubpassDescription-pColorAttachments-02648";
skip |= LogError(device, vuid,
"%s: Color attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
}
attach_first_use[attachment_index] = false;
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if (input_attachments.find(attachment_index) != input_attachments.end()) {
skip |= LogError(device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but color attachments must be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_index == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_index);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_index];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |= LogError(device, vuid,
"%s: %s resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, error_type.c_str(), color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool view_mask_zero = false;
bool view_mask_non_zero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
view_mask_non_zero = true;
} else {
view_mask_zero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= LogError(device, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (view_mask_non_zero && view_mask_zero) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |=
LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
if (rp_version == RENDER_PASS_VERSION_2) {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2-srcStageMask-03080",
"VUID-VkSubpassDependency2-srcStageMask-03082", "VUID-VkSubpassDependency2-srcStageMask-02103",
"VUID-VkSubpassDependency2-srcStageMask-02104");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2-dstStageMask-03081",
"VUID-VkSubpassDependency2-dstStageMask-03083", "VUID-VkSubpassDependency2-dstStageMask-02105",
"VUID-VkSubpassDependency2-dstStageMask-02106");
} else {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099",
"VUID-VkSubpassDependency-srcStageMask-02100");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101",
"VUID-VkSubpassDependency-dstStageMask-02102");
}
if (!ValidateAccessMaskPipelineStage(enabled_features, sync_utils::kAllQueueTypes, dependency.srcAccessMask,
dependency.srcStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
skip |=
LogError(device, vuid,
"%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(enabled_features, sync_utils::kAllQueueTypes, dependency.dstAccessMask,
dependency.dstStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
skip |=
LogError(device, vuid,
"%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
function_name, i, dependency.dstAccessMask, dependency.dstStageMask);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (multiview_info) {
if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928",
"vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.",
pCreateInfo->subpassCount, multiview_info->subpassCount);
} else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929",
"vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.",
pCreateInfo->dependencyCount, multiview_info->dependencyCount);
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info =
LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (input_attachment_aspect_info) {
for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) {
uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass;
uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926",
"vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater "
"than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927",
"vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is "
"greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info =
LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext);
if (fragment_density_map_info) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) {
if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547",
"vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of "
"for this render pass.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount);
} else {
if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout ==
VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549",
"vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to "
"VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_LOAD ||
pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp "
"equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp !=
VK_ATTACHMENT_STORE_OP_DONT_CARE) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp "
"equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.",
fragment_density_map_info->fragmentDensityMapAttachment.attachment);
}
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2 create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()");
}
return skip;
}
bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
// If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_resolve_attachment_index =
(resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount);
const bool ds_attachment_not_unused =
(subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_ds_attachment_index =
(ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount);
if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr &&
subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE &&
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && valid_ds_attachment_index &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (valid_resolve_attachment_index &&
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
VkFormat depth_stencil_attachment_format =
(valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
VkFormat depth_stencil_resolve_attachment_format =
(valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
if (valid_ds_attachment_index && valid_resolve_attachment_index) {
const auto resolve_depth_size = FormatDepthSize(depth_stencil_resolve_attachment_format);
const auto resolve_stencil_size = FormatStencilSize(depth_stencil_resolve_attachment_format);
if (resolve_depth_size > 0 && ((FormatDepthSize(depth_stencil_attachment_format) != resolve_depth_size) ||
(FormatDepthNumericalType(depth_stencil_attachment_format) !=
FormatDepthNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size,
FormatDepthSize(depth_stencil_attachment_format));
}
if (resolve_stencil_size > 0 && ((FormatStencilSize(depth_stencil_attachment_format) != resolve_stencil_size) ||
(FormatStencilNumericalType(depth_stencil_attachment_format) !=
FormatStencilNumericalType(depth_stencil_resolve_attachment_format)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size,
FormatStencilSize(depth_stencil_attachment_format));
}
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid depthResolveMode=%u.",
function_name, i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid stencilResolveMode=%u.",
function_name, i, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE);
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
const char *function_name) const {
bool skip = false;
if (device_extensions.vk_khr_depth_stencil_resolve) {
skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name);
}
skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo);
safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name);
return skip;
}
bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) {
std::vector<uint32_t> used_as_fragment_shading_rate_attachment;
// Prepass to find any use as a fragment shading rate attachment structures and validate them independently
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment =
LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext);
if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) {
const VkAttachmentReference2 &attachment_reference =
*(fragment_shading_rate_attachment->pFragmentShadingRateAttachment);
if (attachment_reference.attachment == attachment_description) {
used_as_fragment_shading_rate_attachment.push_back(subpass);
}
if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) &&
(attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521",
"vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but "
"a fragment shading rate attachment is specified in subpass %u.",
subpass);
}
if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) {
const VkFormatFeatureFlags potential_format_features =
GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format);
if (!(potential_format_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) {
skip |=
LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586",
"vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment "
"shading rate attachment, but specifies format %s, which does not support "
"VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.",
attachment_reference.attachment, subpass,
string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format));
}
if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL &&
attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) {
skip |= LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.",
subpass, string_VkImageLayout(attachment_reference.layout));
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel width of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is lower than the advertised minimum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which "
"is higher than the advertised maximum width %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width);
}
if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) {
skip |=
LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a "
"non-power-of-two texel height of %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height <
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is lower than the advertised minimum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height);
}
if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u "
"which is higher than the advertised maximum height %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height,
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height);
}
uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height;
uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height /
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width;
if (aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
if (inverse_aspect_ratio >
phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) {
LogError(
device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532",
"vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, "
"which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio "
"%u.",
subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width,
fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio,
phys_dev_ext_props.fragment_shading_rate_props
.maxFragmentShadingRateAttachmentTexelSizeAspectRatio);
}
}
}
}
// Lambda function turning a vector of integers into a string
auto vector_to_string = [&](std::vector<uint32_t> vector) {
std::stringstream ss;
size_t size = vector.size();
for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) {
if (size == 2 && i == 1) {
ss << " and ";
} else if (size > 2 && i == size - 2) {
ss << ", and ";
} else if (i != 0) {
ss << ", ";
}
ss << vector[i];
}
return ss.str();
};
// Search for other uses of the same attachment
if (!used_as_fragment_shading_rate_attachment.empty()) {
for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) {
const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass];
const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment =
LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext);
std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment);
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) {
if (subpass_info.pResolveAttachments &&
subpass_info.pResolveAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as color resolve attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) {
if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as input attachment %u in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass);
}
}
if (subpass_info.pDepthStencilAttachment) {
if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) {
if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment ==
attachment_description) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585",
"vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in "
"subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u",
attachment_description, fsr_attachment_subpasses_string.c_str(), subpass);
}
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()");
}
bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()");
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= LogError(pCB->commandBuffer, error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const {
bool skip = false;
const safe_VkFramebufferCreateInfo *framebuffer_info = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) {
skip |= static_cast<bool>(LogError(
pRenderPassBegin->renderPass, kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, framebuffer_info->width, framebuffer_info->height));
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo,
const char *func_name) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info =
LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext);
if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *framebuffer_create_info =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info =
LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext);
if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"%s: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT",
func_name);
} else if (framebuffer_attachments_create_info) {
if (framebuffer_attachments_create_info->attachmentImageInfoCount !=
render_pass_attachment_begin_info->attachmentCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"%s: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
func_name, render_pass_attachment_begin_info->attachmentCount,
framebuffer_attachments_create_info->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2 *render_pass_create_info =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) {
const auto image_view_state = GetImageViewState(render_pass_attachment_begin_info->pAttachments[i]);
const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info;
const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info =
&framebuffer_attachments_create_info->pAttachmentImageInfos[i];
const VkImageCreateInfo *image_create_info = &GetImageState(image_view_create_info->image)->createInfo;
if (framebuffer_attachment_image_info->flags != image_create_info->flags) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209",
"%s: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags);
}
if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) {
// Give clearer message if this error is due to the "inherited" part or not
if (image_create_info->usage == image_view_state->inherited_usage) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage);
} else {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627",
"%s: Image view #%u created from an image with usage set as 0x%X but using "
"VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X "
"and the image info #%u used to create the framebuffer had usage set as 0x%X",
func_name, i, image_create_info->usage, image_view_state->inherited_usage, i,
framebuffer_attachment_image_info->usage);
}
}
uint32_t view_width = image_create_info->extent.width >> image_view_create_info->subresourceRange.baseMipLevel;
if (framebuffer_attachment_image_info->width != view_width) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211",
"%s: Image view #%u created from an image subresource with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
func_name, i, view_width, i, framebuffer_attachment_image_info->width);
}
uint32_t view_height = image_create_info->extent.width >> image_view_create_info->subresourceRange.baseMipLevel;
if (framebuffer_attachment_image_info->height != view_height) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212",
"%s: Image view #%u created from an image subresource with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
func_name, i, view_height, i, framebuffer_attachment_image_info->height);
}
if (framebuffer_attachment_image_info->layerCount != image_view_create_info->subresourceRange.layerCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"%s: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
func_name, i, image_view_create_info->subresourceRange.layerCount, i,
framebuffer_attachment_image_info->layerCount);
}
const VkImageFormatListCreateInfo *image_format_list_create_info =
LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext);
if (image_format_list_create_info) {
if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) {
skip |= LogError(
pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, image_format_list_create_info->viewFormatCount, i,
framebuffer_attachment_image_info->viewFormatCount);
}
for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) {
bool format_found = false;
for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) {
if (image_format_list_create_info->pViewFormats[j] ==
framebuffer_attachment_image_info->pViewFormats[k]) {
format_found = true;
}
}
if (!format_found) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i);
}
}
}
if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216",
"%s: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
func_name, i, string_VkFormat(image_view_create_info->format), i,
string_VkFormat(render_pass_create_info->pAttachments[i].format));
}
if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217",
"%s: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i,
string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples));
}
if (image_view_create_info->subresourceRange.levelCount != 1) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218",
"%s: Image view #%u created with multiple (%u) mip levels.", func_name, i,
image_view_create_info->subresourceRange.levelCount);
}
if (IsIdentitySwizzle(image_view_create_info->components) == false) {
skip |= LogError(
render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219",
"%s: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r),
string_VkComponentSwizzle(image_view_create_info->components.g),
string_VkComponentSwizzle(image_view_create_info->components.b),
string_VkComponentSwizzle(image_view_create_info->components.a));
}
if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) {
skip |= LogError(render_pass_attachment_begin_info->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114",
"%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i);
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info =
LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (sample_locations_begin_info) {
for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) {
const VkAttachmentSampleLocationsEXT &sample_location =
sample_locations_begin_info->pAttachmentInitialSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) {
skip |=
LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"%s: Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) {
const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) {
skip |=
LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto attachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
if (FormatHasDepth(attachment->format)) {
skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil,
function_name);
}
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= LogError(render_pass_state->renderPass, "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name);
skip |= VerifyRenderAreaBounds(pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass";
skip |= InsideRenderPass(cb_state, function_name, vuid);
skip |= ValidateDependencies(framebuffer, render_pass_state);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2 : CMD_BEGINRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
}
auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdNextSubpass2()" : "vkCmdNextSubpass()";
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2 : CMD_NEXTSUBPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-renderpass" : "VUID-vkCmdNextSubpass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpass_count - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass,
Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get();
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name);
}
}
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-renderpass" : "VUID-vkCmdEndRenderPass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2 : CMD_ENDRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get());
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) {
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) const {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer : VK_NULL_HANDLE;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
objlist.add(primary_fb);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const {
bool skip = false;
unordered_set<int> active_types;
if (!disabled[query_validation]) {
for (auto query_object : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(query_object.pool);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(query_object.pool).c_str());
}
}
active_types.insert(query_pool_state->createInfo.queryType);
}
}
for (auto query_object : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(query_object.pool);
if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(query_object.pool);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer).c_str());
}
}
}
auto primary_pool = pCB->command_pool.get();
auto secondary_pool = pSubCB->command_pool.get();
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
LogObjectList objlist(pSubCB->commandBuffer);
objlist.add(pCB->commandBuffer);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094",
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const CMD_BUFFER_STATE *sub_cb_state = NULL;
std::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->activeRenderPass->renderPass);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer.get());
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load()) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00091",
"vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
// We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer
if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092",
"vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
"set if previously executed in %s",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
const auto insert_pair = linked_command_buffers.insert(sub_cb_state);
if (!insert_pair.second) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00093",
"vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->commandBuffer);
skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto &sub_cb_subres_map = sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (const auto &subres_layout : *sub_cb_subres_map) {
const auto &sub_layout = subres_layout.initial_layout;
const auto &subresource = subres_layout.subresource;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
// Look up the layout to compared to the intial layout of the sub command buffer (current else initial)
auto cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource);
auto cb_layout = cb_layouts.current_layout;
const char *layout_type = "current";
if (cb_layouts.current_layout == kInvalidLayout) {
cb_layout = cb_layouts.initial_layout;
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
// All commands buffers involved must be protected or unprotected
if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820",
"vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
} else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821",
"vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
}
skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
if (mem_info->multi_instance) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00683",
"Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask "
"with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.",
report_data->FormatHandle(mem).c_str());
}
skip |= ValidateMapMemRange(mem_info, offset, size);
}
return skip;
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mapped_range.size) {
// Valid Usage: memory must currently be mapped
skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
// Makes sure the memory is already mapped
if (mem_info->mapped_range.size == 0) {
skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684",
"%s: Attempting to use memory (%s) that is not currently host mapped.", funcName,
report_data->FormatHandle(pMemRanges[i].memory).c_str());
}
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mapped_range.offset > pMemRanges[i].offset) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mapped_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mapped_range.offset + mem_info->mapped_range.size);
if ((mem_info->mapped_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
const VkDeviceSize offset = mem_ranges[i].offset;
const VkDeviceSize size = mem_ranges[i].size;
if (SafeModulo(offset, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
const VkDeviceSize allocation_size = mem_info->alloc_info.allocationSize;
if ((size != VK_WHOLE_SIZE) && (size + offset != allocation_size) && (SafeModulo(size, atom_size) != 0)) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, size, atom_size);
} else if ((size == VK_WHOLE_SIZE) && SafeModulo(allocation_size - offset, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389",
"%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and allocationSize minus offset (0x%" PRIxLEAST64
" - 0x%" PRIxLEAST64
") is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, allocation_size, offset, atom_size);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"vkGetDeviceMemoryCommitment(): Querying commitment for memory without "
"VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos,
const char *api_name) const {
bool skip = false;
bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0;
char error_prefix[128];
strcpy(error_prefix, api_name);
// Track all image sub resources if they are bound for bind_image_mem_2
// uint32_t[3] is which index in pBindInfos for max 3 planes
// Non disjoint images act as a single plane
std::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound;
for (uint32_t i = 0; i < bindInfoCount; i++) {
if (bind_image_mem_2 == true) {
sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i);
}
const VkBindImageMemoryInfo &bind_info = pBindInfos[i];
const IMAGE_STATE *image_state = GetImageState(bind_info.image);
if (image_state) {
// Track objects tied to memory
skip |=
ValidateSetMemBinding(bind_info.memory, VulkanTypedHandle(bind_info.image, kVulkanObjectTypeImage), error_prefix);
const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext);
const auto mem_info = GetDevMemState(bind_info.memory);
// Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors
// no 'else' case as if that happens another VUID is already being triggered for it being invalid
if ((plane_info == nullptr) && (image_state->disjoint == false)) {
// Check non-disjoint images VkMemoryRequirements
// All validation using the image_state->requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
const VkMemoryRequirements mem_req = image_state->requirements;
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memoryOffset-01048";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613";
}
skip |=
LogError(bind_info.image, validation_error,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, bind_info.memoryOffset, mem_req.alignment);
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-size-01049";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01614";
}
skip |= LogError(bind_info.image, validation_error,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size);
}
// Validate memory type used
{
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01047";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01612";
}
skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error);
}
}
}
if (bind_image_mem_2 == true) {
// since its a non-disjoint image, finding VkImage in map is a duplicate
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX};
resources_bound.emplace(image_state->image, bound_index);
} else {
skip |= LogError(
bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]",
error_prefix, it->second[0], i);
}
}
} else if ((plane_info != nullptr) && (image_state->disjoint == true)) {
// Check disjoint images VkMemoryRequirements for given plane
int plane = 0;
// All validation using the image_state->plane*_requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
VkMemoryRequirements disjoint_mem_req = {};
const VkImageAspectFlagBits aspect = plane_info->planeAspect;
switch (aspect) {
case VK_IMAGE_ASPECT_PLANE_0_BIT:
plane = 0;
disjoint_mem_req = image_state->plane0_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
plane = 1;
disjoint_mem_req = image_state->plane1_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
plane = 2;
disjoint_mem_req = image_state->plane2_requirements;
break;
default:
assert(false); // parameter validation should have caught this
break;
}
// Validate memory requirements alignment
if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.",
error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect));
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.",
error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size,
string_VkImageAspectFlagBits(aspect));
}
// Validate memory type used
{
skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix,
"VUID-VkBindImageMemoryInfo-pNext-01619");
}
}
}
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX};
bound_index[plane] = i;
resources_bound.emplace(image_state->image, bound_index);
} else {
if (it->second[plane] == UINT32_MAX) {
it->second[plane] = i;
} else {
skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same disjoint image sub-resource for plane %d is being bound twice at "
"pBindInfos[%d] and pBindInfos[%d]",
error_prefix, plane, it->second[plane], i);
}
}
}
if (mem_info) {
// Validate bound memory range information
// if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed
if ((mem_info->is_export == false) || ((mem_info->export_handle_type_flags &
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) {
skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info, bind_info.memoryOffset, error_prefix);
}
// Validate dedicated allocation
if (mem_info->is_dedicated) {
if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) {
const auto orig_image_state = GetImageState(mem_info->dedicated_image);
const auto current_image_state = GetImageState(bind_info.image);
if ((bind_info.memoryOffset != 0) || !orig_image_state || !current_image_state ||
!current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible(
orig_image_state->createInfo)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-02629";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-02629";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(
objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible "
"with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
} else {
if ((mem_info->dedicated_image != VK_NULL_HANDLE) &&
((bind_info.memoryOffset != 0) || (mem_info->dedicated_image != bind_info.image))) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01509";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
objlist.add(mem_info->dedicated_image);
skip |=
LogError(objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset);
}
}
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least "
"one handle from VkImage (%s) handleType %s.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory,
bind_info.image);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02989";
} else if ((!bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02989";
} else if ((bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02729";
} else if ((!bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02729";
}
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s "
"which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(bind_info.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((image_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
} else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902";
LogObjectList objlist(bind_info.image);
objlist.add(bind_info.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not "
"set to use protected memory.",
api_name, report_data->FormatHandle(bind_info.memory).c_str(),
report_data->FormatHandle(bind_info.image).c_str());
}
}
const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext);
if (swapchain_info) {
if (bind_info.memory != VK_NULL_HANDLE) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.",
error_prefix, report_data->FormatHandle(bind_info.memory).c_str());
}
if (image_state->create_from_swapchain != swapchain_info->swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(swapchain_info->swapchain);
skip |= LogError(
objlist, kVUID_Core_BindImageMemory_Swapchain,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(swapchain_info->swapchain).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix,
swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(),
static_cast<int>(swapchain_state->images.size()));
}
} else {
if (image_state->create_from_swapchain) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.",
error_prefix);
}
if (!mem_info) {
skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix,
report_data->FormatHandle(bind_info.memory).c_str());
}
}
if (plane_info) {
// Checks for disjoint bit in image
if (image_state->disjoint == false) {
skip |= LogError(
bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with "
"VK_IMAGE_CREATE_DISJOINT_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str());
}
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_state->createInfo.format);
VkImageAspectFlags aspect = plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
}
}
}
// Check to make sure all disjoint planes were bound
for (std::pair<const VkImage, std::array<uint32_t, 3>> &resource : resources_bound) {
const IMAGE_STATE *image_state = GetImageState(resource.first);
if (image_state->disjoint == true) {
uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format);
for (uint32_t i = 0; i < total_planes; i++) {
if (resource.second[i] == UINT32_MAX) {
skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858",
"%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually "
"in separate pBindInfos in a single call.",
api_name, i, total_planes);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |=
LogError(image, "VUID-vkBindImageMemory-image-01608",
"%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).",
report_data->FormatHandle(image).c_str());
}
}
VkBindImageMemoryInfo bind_info = {};
bind_info.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_info.pNext = nullptr;
bind_info.image = image;
bind_info.memory = mem;
bind_info.memoryOffset = memoryOffset;
skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()");
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()");
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfo *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()");
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |=
LogError(event, kVUID_Core_DrawState_QueueForwardProgress,
"vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) const {
const auto queue_data = GetQueueState(queue);
const auto fence_state = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113",
"VkQueueBindSparse()");
if (skip) {
return true;
}
const auto queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags;
if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) {
skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype",
"vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.");
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
auto *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245"
: kVUID_Core_DrawState_QueueForwardProgress;
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
const VkBindSparseInfo &bind_info = pBindInfo[bind_idx];
auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext);
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pWaitSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].waitSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
(semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist,
semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be "
"signaled.",
report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info.pSignalSemaphores[i];
const auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249",
"VkQueueBindSparse: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pBindInfo[%u].pSignalSemaphores[%u]",
semaphore_state->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info &&
bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |=
LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].signalSemaphoreCount (%u)",
bind_idx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount);
}
if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY &&
semaphore_state->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) ||
(!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(semaphore_state->signaler.first);
skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was "
"previously signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), bind_idx, i,
report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(semaphore_state->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) {
const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx];
const auto image_state = GetImageState(image_bind.image);
if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901",
"vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set",
bind_idx, image_idx);
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) {
const VkBindSparseInfo *bind_info = &pBindInfo[bind_idx];
auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(bind_info->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info->pWaitSemaphores[i];
skip |=
ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pWaitSemaphoreValues[i], "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pWaitSemaphores-03250");
}
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount) {
for (uint32_t i = 0; i < bind_info->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bind_info->pSignalSemaphores[i];
skip |=
ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pSignalSemaphoreValues[i], "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pSignalSemaphores-03251");
}
}
}
}
return skip;
}
bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const {
bool skip = false;
const auto semaphore_state = GetSemaphoreState(pSignalInfo->semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
return skip;
}
if (semaphore_state && semaphore_state->payload >= pSignalInfo->value) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258",
"%s(): value must be greater than current semaphore %s value", api_name,
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queue_state = pair.second;
for (const auto &submission : queue_state.submissions) {
for (const auto &signal_semaphore : submission.signalSemaphores) {
if (signal_semaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signal_semaphore.payload) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259",
"%s(): value must be greater than value of pending signal operation "
"for semaphore %s",
api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
}
}
}
if (!skip) {
skip |= ValidateMaxTimelineSemaphoreValueDifference(pSignalInfo->semaphore, pSignalInfo->value, "VkSignalSemaphoreKHR",
"VUID-VkSemaphoreSignalInfo-value-03260");
}
return skip;
}
bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore");
}
bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const {
return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR");
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const {
bool skip = false;
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device,
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str());
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(
VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448",
"vkImportFenceWin32HandleKHR()");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const {
return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()");
}
static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) {
VkImageCreateInfo result = {};
result.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
result.pNext = nullptr;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT;
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
}
result.imageType = VK_IMAGE_TYPE_2D;
result.format = pCreateInfo->imageFormat;
result.extent.width = pCreateInfo->imageExtent.width;
result.extent.height = pCreateInfo->imageExtent.height;
result.extent.depth = 1;
result.mipLevels = 1;
result.arrayLayers = pCreateInfo->imageArrayLayers;
result.samples = VK_SAMPLE_COUNT_1_BIT;
result.tiling = VK_IMAGE_TILING_OPTIMAL;
result.usage = pCreateInfo->imageUsage;
result.sharingMode = pCreateInfo->imageSharingMode;
result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount;
result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices;
result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
return result;
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name)) {
return true;
}
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) {
return true;
}
}
if (old_swapchain_state->retired) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name)) {
return true;
}
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height)) {
return true;
}
}
auto physical_device_state = GetPhysicalDeviceState();
bool skip = false;
VkSurfaceTransformFlagBitsKHR current_transform = physical_device_state->surfaceCapabilities.currentTransform;
if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) {
skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(current_transform));
}
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) {
return true;
}
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string error_string = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
error_string += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i));
sprintf(str, " %s\n", new_str);
error_string += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) {
return true;
}
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
const char *validation_error = "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276";
if ((IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) == true) &&
((pCreateInfo->presentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR))) {
validation_error = "VUID-VkSwapchainCreateInfoKHR-presentMode-01427";
}
if (LogError(device, validation_error,
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) {
return true;
}
}
if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surface_info.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surface_capabilities.pNext = &surface_protected_capabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surface_info, &surface_capabilities);
if (!surface_protected_capabilities.supportsProtected) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name)) {
return true;
}
}
}
std::vector<VkSurfaceFormatKHR> surface_formats;
const auto *surface_formats_ref = &surface_formats;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->surface_formats.empty()) {
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count,
&surface_formats[0]);
} else {
surface_formats_ref = &physical_device_state->surface_formats;
}
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool found_format = false;
bool found_color_space = false;
bool found_match = false;
for (auto const &format : *surface_formats_ref) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
found_format = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_match = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
found_color_space = true;
}
}
}
if (!found_match) {
if (!found_format) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if (!found_color_space) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name,
string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) {
return true;
}
}
}
}
std::vector<VkPresentModeKHR> present_modes;
const auto *present_modes_ref = &present_modes;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->present_modes.empty()) {
uint32_t present_mode_count = 0;
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, nullptr);
present_modes.resize(present_mode_count);
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, &present_modes[0]);
} else {
present_modes_ref = &physical_device_state->present_modes;
}
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool found_match =
std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end();
if (!found_match) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name,
string_VkPresentModeKHR(pCreateInfo->presentMode))) {
return true;
}
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!device_extensions.vk_khr_shared_presentable_image) {
if (LogError(
device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) {
return true;
}
} else if (pCreateInfo->minImageCount != 1) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) {
return true;
}
}
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
if (!device_extensions.vk_khr_swapchain_mutable_format) {
if (LogError(device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
"VK_KHR_swapchain_mutable_format extension, which has not been enabled.",
func_name)) {
return true;
}
} else {
const auto *image_format_list = LvlFindInChain<VkImageFormatListCreateInfo>(pCreateInfo->pNext);
if (image_format_list == nullptr) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of "
"pCreateInfo does not contain an instance of VkImageFormatListCreateInfo.",
func_name)) {
return true;
}
} else if (image_format_list->viewFormatCount == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount "
"member of VkImageFormatListCreateInfo in the pNext chain is zero.",
func_name)) {
return true;
}
} else {
bool found_base_format = false;
for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) {
if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) {
found_base_format = true;
break;
}
}
if (!found_base_format) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the "
"elements of the pViewFormats member of VkImageFormatListCreateInfo match "
"pCreateInfo->imageFormat.",
func_name)) {
return true;
}
}
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428");
if (skip1) return true;
}
// Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat);
const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures;
if (tiling_features == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this "
"physical device.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo);
VkImageFormatProperties image_properties = {};
const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties(
physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage,
image_create_info.flags, &image_properties);
if (image_properties_result != VK_SUCCESS) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, "
"when called for %s validation with following params: "
"format: %s, imageType: %s, "
"tiling: %s, usage: %s, "
"flags: %s.",
func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType),
string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(),
string_VkImageCreateFlags(image_create_info.flags).c_str())) {
return true;
}
}
// Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers
if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with a non-supported imageArrayLayers (i.e. %d). "
"Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers,
string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
// Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent
if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) ||
(pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)"
"returned by vkGetPhysicalDeviceImageFormatProperties(): "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width,
image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) {
return true;
}
}
if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) &&
device_group_create_info.physicalDeviceCount == 1) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429",
"%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR"
"but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1",
func_name)) {
return true;
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
imageLayoutMap.erase(swapchain_image.image);
EraseQFOImageRelaseBarriers(swapchain_image.image);
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) const {
auto swapchain_state = GetSwapchainState(swapchain);
bool skip = false;
if (swapchain_state && pSwapchainImages) {
if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
LogError(device, kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImages, and with pSwapchainImageCount set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImages was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
// This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages.
// The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size.
// The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE.
// So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time.
// pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR.
uint32_t new_swapchain_image_index = 0;
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
auto swapchain_state = GetSwapchainState(swapchain);
const auto image_vector_size = swapchain_state->images.size();
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
if ((new_swapchain_image_index >= image_vector_size) ||
(swapchain_state->images[new_swapchain_image_index].image == VK_NULL_HANDLE)) {
break;
};
}
}
StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
}
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const {
bool skip = false;
const auto queue_state = GetQueueState(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
const auto semaphore_state = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267",
"vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
if (semaphore_state && !semaphore_state->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) {
LogObjectList objlist(queue);
objlist.add(pPresentInfo->pWaitSemaphores[i]);
skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268",
"vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
// VU currently is 2-in-1, covers being a valid index and valid layout
const char *validation_error = (device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkPresentInfoKHR-pImageIndices-01430"
: "VUID-VkPresentInfoKHR-pImageIndices-01296";
// Check if index is even possible to be acquired to give better error message
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.",
i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size()));
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]].image;
const auto image_state = GetImageState(image);
if (!image_state->acquired) {
skip |= LogError(pPresentInfo->pSwapchains[i], validation_error,
"vkQueuePresentKHR: pSwapchains[%u] image index %u has not been acquired.", i,
pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= LogError(queue, validation_error,
"vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
i, string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR",
i);
} else if (!support_it->second) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i);
}
}
}
}
if (pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains) const {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain,
uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex,
const char *func_name, const char *semaphore_type_vuid) const {
bool skip = false;
auto semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) {
skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name,
report_data->FormatHandle(semaphore).c_str());
}
if (semaphore_state && semaphore_state->scope == kSyncScopeInternal && semaphore_state->signaled) {
skip |= LogError(semaphore, "VUID-vkAcquireNextImageKHR-semaphore-01286",
"%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto fence_state = GetFenceState(fence);
if (fence_state) {
skip |= ValidateFenceForSubmit(fence_state, "VUID-vkAcquireNextImageKHR-fence-01287",
"VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()");
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-swapchain-01285",
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
auto physical_device_state = GetPhysicalDeviceState();
// TODO: this is technically wrong on many levels, but requires massive cleanup
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) {
const uint32_t acquired_images = static_cast<uint32_t>(
std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [this](SWAPCHAIN_IMAGE image) {
auto const state = GetImageState(image.image);
return (state && state->acquired);
}));
const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size());
const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount;
const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count;
if (timeout == UINT64_MAX && too_many_already_acquired) {
const char *vuid = "INVALID-vuid";
if (cmd_version == CMD_VERSION_1) {
vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802";
} else if (cmd_version == CMD_VERSION_2) {
vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803";
} else {
assert(false);
}
const uint32_t acquirable = swapchain_image_count - min_image_count + 1;
skip |= LogError(swapchain, vuid,
"%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32
" %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32
", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").",
func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable,
acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const {
return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) const {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR",
"VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
return skip;
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) const {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) const {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const {
bool skip = false;
const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) ||
(bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV);
if (!valid_bp) {
skip |=
LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set,
const void *pData) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
const auto layout_data = GetPipelineLayout(layout);
const auto dsl = GetDslFromPipelineLayout(layout_data, set);
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(descriptorUpdateTemplate);
objlist.add(template_ci.pipelineLayout);
objlist.add(layout);
skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, planeIndex, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex,
VkDisplayPlaneCapabilitiesKHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) const {
bool skip = false;
const VkDisplayModeKHR display_mode = pCreateInfo->displayMode;
const uint32_t plane_index = pCreateInfo->planeIndex;
if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) {
const float global_alpha = pCreateInfo->globalAlpha;
if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) {
skip |= LogError(
display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.",
global_alpha);
}
}
const DISPLAY_MODE_STATE *dm_state = GetDisplayModeState(display_mode);
if (dm_state != nullptr) {
// Get physical device from VkDisplayModeKHR state tracking
const VkPhysicalDevice physical_device = dm_state->physical_device;
const auto physical_device_state = GetPhysicalDeviceState(physical_device);
VkPhysicalDeviceProperties device_properties = {};
DispatchGetPhysicalDeviceProperties(physical_device, &device_properties);
const uint32_t width = pCreateInfo->imageExtent.width;
const uint32_t height = pCreateInfo->imageExtent.height;
if (width >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
width, device_properties.limits.maxImageDimension2D);
}
if (height >= device_properties.limits.maxImageDimension2D) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256",
"vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32
") exceeds device limit maxImageDimension2D (%" PRIu32 ").",
height, device_properties.limits.maxImageDimension2D);
}
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (plane_index >= physical_device_state->display_plane_property_count) {
skip |=
LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252",
"vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
plane_index, physical_device_state->display_plane_property_count - 1);
} else {
// call here once we know the plane index used is a valid plane index
VkDisplayPlaneCapabilitiesKHR plane_capabilities;
DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities);
if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) {
skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255",
"vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) "
"does not support the mode.",
string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index,
plane_capabilities.supportedAlpha);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
struct BeginQueryIndexedVuids : ValidateBeginQueryVuids {
BeginQueryIndexedVuids() : ValidateBeginQueryVuids() {
vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool";
vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338";
vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803";
vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800";
vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802";
vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223";
vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224";
vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225";
vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-01922";
vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885";
}
};
BeginQueryIndexedVuids vuids;
bool skip = ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, &vuids);
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (device_extensions.vk_ext_transform_feedback &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()");
}
void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, query, index};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) const {
if (disabled[query_validation]) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
struct EndQueryIndexedVuids : ValidateEndQueryVuids {
EndQueryIndexedVuids() : ValidateEndQueryVuids() {
vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool";
vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342";
vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344";
}
};
EndQueryIndexedVuids vuids;
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", &vuids);
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount,
const VkRect2D *pDiscardRectangles) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT");
const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS);
const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state;
if (pipe != nullptr) {
// Check same error with different log messages
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->graphicsPipelineCI.pMultisampleState;
if (multisample_state == nullptr) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to "
"rasterizationSamples, but the bound graphics pipeline was created without a multisample state");
} else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to "
"the last bound pipeline's rasterizationSamples (%s)",
string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
const VkFormat conversion_format = create_info->format;
// Need to check for external format conversion first as it allows for non-UNORM format
bool external_format = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
external_format = true;
if (VK_FORMAT_UNDEFINED != create_info->format) {
return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"%s: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.",
func_name);
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061"
: "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060";
skip |=
LogError(device, vuid,
"%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.",
func_name, string_VkFormat(conversion_format));
}
// Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features
// (vkspec.html#potential-format-features)
VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
if (conversion_format == VK_FORMAT_UNDEFINED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format
// features being supported
if (external_format == true) {
auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
format_features = it->second;
}
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
} else {
format_features = GetPotentialFormatFeatures(conversion_format);
}
// Check all VUID that are based off of VkFormatFeatureFlags
// These can't be in StatelessValidation due to needing possible External AHB state for feature support
if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) &&
((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650",
"%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or "
"VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT",
func_name, string_VkFormat(conversion_format));
}
if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
}
if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) &&
(create_info->forceExplicitReconstruction == VK_TRUE)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656",
"%s: Format %s does not support "
"VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so "
"forceExplicitReconstruction must be VK_FALSE",
func_name, string_VkFormat(conversion_format));
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) &&
(create_info->chromaFilter == VK_FILTER_LINEAR)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657",
"%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so "
"chromaFilter must not be VK_FILTER_LINEAR",
func_name, string_VkFormat(conversion_format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const {
bool skip = false;
if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) {
skip |= LogError(
device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110",
"vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).",
samplerMap.size(), phys_dev_props.limits.maxSamplerAllocationCount);
}
if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) {
const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext);
if (conversion_info != nullptr) {
const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion;
const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion);
if ((ycbcr_state->format_features &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) {
const VkFilter chroma_filter = ycbcr_state->chromaFilter;
if (pCreateInfo->minFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
if (pCreateInfo->magFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
}
// At this point there is a known sampler YCbCr conversion enabled
const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext);
if (sampler_reduction != nullptr) {
if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647",
"A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode "
"must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE.");
}
}
}
}
if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT ||
pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) {
if (!enabled_features.custom_border_color_features.customBorderColors) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085",
"vkCreateSampler(): A custom border color was specified without enabling the custom border color feature");
}
auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext);
if (custom_create_info) {
if (custom_create_info->format == VK_FORMAT_UNDEFINED &&
!enabled_features.custom_border_color_features.customBorderColorWithoutFormat) {
skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014",
"vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the "
"customBorderColorWithoutFormat feature being enabled");
}
}
if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012",
"vkCreateSampler(): Creating a sampler with a custom border color will exceed the "
"maxCustomBorderColorSamplers limit of %d",
phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers);
}
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467",
"vkCreateSampler (portability error): mip LOD bias not supported.");
}
}
return skip;
}
bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324",
"%s: The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325",
"%s: If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true,
"VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName,
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT");
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressEXT");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferDeviceAddress");
}
bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo),
"vkGetBufferOpaqueCaptureAddress");
}
bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334",
"%s(): The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335",
"%s(): If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
if (mem_info) {
auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) {
skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336",
"%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddressKHR");
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo),
"vkGetDeviceMemoryOpaqueCaptureAddress");
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange,
const char *apiName) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= LogError(device, vuid_badfirst,
"%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= LogError(device, vuid_badrange,
"%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName,
firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
const char *apiName) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (!enabled_features.core12.hostQueryReset) {
skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName);
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName);
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT");
}
bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool");
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t in_size = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()");
}
bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()");
}
bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue,
const char *apiName) const {
bool skip = false;
const auto *semaphore_state = GetSemaphoreState(semaphore);
if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) {
skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255",
"%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName,
report_data->FormatHandle(semaphore).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR");
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue");
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= LogError(commandBuffer, vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const {
bool skip = false;
if (!performance_lock_acquired) {
skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235",
"vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful "
"call to vkAcquireProfilingLockKHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const {
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCheckpointNV()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdSetCheckpointNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV, "vkCmdSetCheckpointNV()");
return skip;
}
}
bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, size_t dataSize, void *pData,
size_t stride) const {
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
const auto &as_info = as_state->build_info_khr;
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in "
"pAccelerationStructures must have been built with"
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.",
report_data->FormatHandle(as_state->acceleration_structure).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR(
VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR, "vkCmdWriteAccelerationStructuresPropertiesKHR()");
// This command must only be called outside of a render pass instance
skip |= InsideRenderPass(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()",
"VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-renderpass");
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493",
"vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]);
if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer,
uint32_t accelerationStructureCount,
const VkAccelerationStructureNV *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool,
uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWriteAccelerationStructuresPropertiesNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWriteAccelerationStructuresPropertiesNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV, "vkCmdWriteAccelerationStructuresPropertiesNV()");
// This command must only be called outside of a render pass instance
skip |= InsideRenderPass(cb_state, "vkCmdWriteAccelerationStructuresPropertiesNV()",
"VUID-vkCmdWriteAccelerationStructuresPropertiesNV-renderpass");
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755",
"vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pAccelerationStructures[i]);
if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |=
LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV.");
}
}
}
return skip;
}
uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const {
uint32_t total = pipelineState->raytracingPipelineCI.groupCount;
if (pipelineState->raytracingPipelineCI.pLibraryInfo) {
for (uint32_t i = 0; i < pipelineState->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) {
const PIPELINE_STATE *library_pipeline_state =
GetPipelineState(pipelineState->raytracingPipelineCI.pLibraryInfo->pLibraries[i]);
total += CalcTotalShaderGroupCount(library_pipeline_state);
}
}
return total;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup,
uint32_t groupCount, size_t dataSize, void *pData) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482",
"vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.");
}
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420",
"vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.",
dataSize);
}
uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state);
if (firstGroup >= total_group_count) {
skip |=
LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050",
"vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline.");
}
if ((firstGroup + groupCount) > total_group_count) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419",
"vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number "
"of shader groups in pipeline.");
}
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline,
uint32_t firstGroup, uint32_t groupCount,
size_t dataSize, void *pData) const {
bool skip = false;
if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.",
dataSize);
}
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (!pipeline_state) {
return skip;
}
if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader "
"groups in pipeline.");
}
if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less "
"than or equal to the number of shader groups in pipeline.");
}
if (!(pipeline_state->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607",
"pipeline must have been created with a flags that included "
"VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkDeviceAddress *pIndirectDeviceAddresses,
const uint32_t *pIndirectStrides,
const uint32_t *const *ppMaxPrimitiveCounts) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructuresIndirectKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructuresIndirectKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, "vkCmdBuildAccelerationStructuresIndirectKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructuresIndirectKHR()",
"VUID-vkCmdBuildAccelerationStructuresIndirectKHR-renderpass");
for (uint32_t i = 0; i < infoCount; ++i) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure);
const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure);
if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in "
"VkAccelerationStructureBuildGeometryInfoKHR::flags.");
}
if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is "
"VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,"
" its geometryCount member must have the same value which was specified when "
"srcAccelerationStructure was last built.");
}
if (pInfos[i].flags != src_as_state->build_info_khr.flags) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
if (pInfos[i].type != src_as_state->build_info_khr.type) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is"
" VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which"
" was specified when srcAccelerationStructure was last built.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700",
"vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have "
"been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) {
if (!dst_as_state ||
(dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR &&
dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) {
skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699",
"vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been "
"created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either "
"VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR.");
}
}
}
return skip;
}
bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo,
const char *api_name) const {
bool skip = false;
if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) {
const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfo->src);
if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411",
"(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"
"if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.",
api_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR, "vkCmdCopyAccelerationStructureKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureKHR()", "VUID-vkCmdCopyAccelerationStructureKHR-renderpass");
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR");
return false;
}
bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
bool skip = false;
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR, "vkCmdCopyAccelerationStructureToMemoryKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()",
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR, "vkCmdCopyMemoryToAccelerationStructureKHR()");
// This command must only be called outside of a render pass instance
skip |= InsideRenderPass(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()",
"VUID-vkCmdCopyMemoryToAccelerationStructureKHR-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const {
bool skip = false;
char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365",
"%s: transform feedback is active.", cmd_name);
}
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto const buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state != nullptr);
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358",
"%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360",
"%s: pBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.",
cmd_name, i, pBuffers[i]);
}
// pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply
if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) {
// only report one to prevent redundant error if the size is larger since adding offset will be as well
if (pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362",
"%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32
"](0x%" PRIxLEAST64 ").",
cmd_name, i, pSizes[i], i, buffer_state->createInfo.size);
} else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363",
"%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size);
}
}
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdBeginTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdEndTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (!cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCullModeEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetCullModeEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT, "vkCmdSetCullModeEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384",
"vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetFrontFaceEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetFrontFaceEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT, "vkCmdSetFrontFaceEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383",
"vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,
VkPrimitiveTopology primitiveTopology) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetPrimitiveTopologyEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetPrimitiveTopologyEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT, "vkCmdSetPrimitiveTopologyEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347",
"vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount,
const VkViewport *pViewports) const
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWithCountEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportWithCountEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT, "vkCmdSetViewportWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393",
"vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissorWithCountEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetScissorWithCountEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT, "vkCmdSetScissorWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396",
"vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers2EXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers2EXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT, "vkCmdBindVertexBuffers2EXT()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()",
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357",
"vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.",
pOffsets[i]);
}
if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358",
"vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]);
}
}
}
const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS);
const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state;
auto vibs = IsDynamic(pipe, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
if (vibs && (pStrides == nullptr)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03361",
"vkCmdBindVertexBuffers2EXT(): pStrides is NULL but the bound pipeline was created with "
"VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT dynamic state enabled.");
} else if (!vibs && (pStrides != nullptr)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03361",
"vkCmdBindVertexBuffers2EXT(): pStrides is non-NULL, but the bound pipeline was not created with "
"VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT dynamic state enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT, "vkCmdSetDepthTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352",
"vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthWriteEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthWriteEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT, "vkCmdSetDepthWriteEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354",
"vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthCompareOpEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthCompareOpEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT, "vkCmdSetDepthCompareOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353",
"vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 depthBoundsTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBoundsTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBoundsTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT, "vkCmdSetDepthBoundsTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349",
"vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT, "vkCmdSetStencilTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350",
"vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp,
VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilOpEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilOpEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT, "vkCmdSetStencilOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351",
"vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const {
bool skip = false;
if (device_extensions.vk_khr_portability_subset != ExtEnabled::kNotEnabled) {
if (VK_FALSE == enabled_features.portability_subset_features.events) {
skip |= LogError(device, "VUID-vkCreateEvent-events-04468",
"vkCreateEvent: events are not supported via VK_KHR_portability_subset");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer,
uint32_t pipelineStackSize) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetRayTracingPipelineStackSizeKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetRayTracingPipelineStackSizeKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR, "vkCmdSetRayTracingPipelineStackSizeKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdSetRayTracingPipelineStackSizeKHR()",
"VUID-vkCmdSetRayTracingPipelineStackSizeKHR-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group,
VkShaderGroupShaderKHR groupShader) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (group >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608",
"vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups "
"in pipeline.");
}
return skip;
}
void PIPELINE_STATE::initGraphicsPipeline(const ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
std::shared_ptr<const RENDER_PASS_STATE> &&rpstate) {
reset();
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
uses_color_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uses_depthstencil_attachment = true;
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
if (graphicsPipelineCI.pInputAssemblyState) {
topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
}
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pssci = &pCreateInfo->pStages[i];
this->duplicate_shaders |= this->active_shaders & pssci->stage;
this->active_shaders |= pssci->stage;
state_data->RecordPipelineShaderStage(pssci, this, &stage_state[i]);
}
if (graphicsPipelineCI.pVertexInputState) {
const auto vici = graphicsPipelineCI.pVertexInputState;
if (vici->vertexBindingDescriptionCount) {
this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
vici->pVertexBindingDescriptions, vici->pVertexBindingDescriptions + vici->vertexBindingDescriptionCount);
this->vertex_binding_to_index_map_.reserve(vici->vertexBindingDescriptionCount);
for (uint32_t i = 0; i < vici->vertexBindingDescriptionCount; ++i) {
this->vertex_binding_to_index_map_[vici->pVertexBindingDescriptions[i].binding] = i;
}
}
if (vici->vertexAttributeDescriptionCount) {
this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
vici->pVertexAttributeDescriptions, vici->pVertexAttributeDescriptions + vici->vertexAttributeDescriptionCount);
for (uint32_t i = 0; i < vici->vertexAttributeDescriptionCount; ++i) {
const auto attribute_format = vici->pVertexAttributeDescriptions[i].format;
VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format);
if (FormatElementIsTexel(attribute_format)) {
vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format));
}
this->vertex_attribute_alignments_.push_back(vtx_attrib_req_alignment);
}
}
}
if (graphicsPipelineCI.pColorBlendState) {
const auto cbci = graphicsPipelineCI.pColorBlendState;
if (cbci->attachmentCount) {
this->attachments =
std::vector<VkPipelineColorBlendAttachmentState>(cbci->pAttachments, cbci->pAttachments + cbci->attachmentCount);
}
}
rp_state = rpstate;
}
void PIPELINE_STATE::initComputePipeline(const ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) {
reset();
computePipelineCI.initialize(pCreateInfo);
switch (computePipelineCI.stage.stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
stage_state.resize(1);
state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]);
break;
default:
// TODO : Flag error
break;
}
}
template <typename CreateInfo>
void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const CreateInfo *pCreateInfo) {
reset();
raytracingPipelineCI.initialize(pCreateInfo);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) {
const auto &shader_stage = pCreateInfo->pStages[stage_index];
switch (shader_stage.stage) {
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
break;
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_MISS_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV;
break;
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV;
break;
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
break;
default:
// TODO : Flag error
break;
}
state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]);
}
}
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoNV *pCreateInfo);
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo);
bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize,
const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()";
bool skip = ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetFragmentShadingRateKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETFRAGMENTSHADINGRATEKHR, cmd_name);
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509",
"vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.",
cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->width, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508",
"vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but "
"pipelineFragmentShadingRate is not enabled",
pFragmentSize->height, cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"primitiveFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"attachmentFragmentShadingRate is not enabled",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps is "
"not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name);
}
if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps &&
(combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR &&
combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512",
"vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but "
"fragmentShadingRateNonTrivialCombinerOps "
"is not supported",
string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name);
}
if (pFragmentSize->width == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height,
cmd_name);
}
if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.",
pFragmentSize->width, cmd_name);
}
if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516",
"vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.",
pFragmentSize->height, cmd_name);
}
if (pFragmentSize->width > 4) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517",
"vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width,
cmd_name);
}
if (pFragmentSize->height > 4) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518",
"vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large",
pFragmentSize->height, cmd_name);
}
return skip;
}
| 1 | 15,070 | I verified offline with @jeremyg-lunarg this should be removed. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -65,6 +65,7 @@ func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) DError {
i.Workflow.Logger = i.Workflow.parent.Logger
i.Workflow.Name = s.name
i.Workflow.DefaultTimeout = s.Timeout
+ fmt.Println("incldued_wf name:", i.Workflow.Name, " timeout:", i.Workflow.DefaultTimeout)
var errs DError
Loop: | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"context"
"fmt"
"path/filepath"
"reflect"
"strings"
)
// IncludeWorkflow defines a Daisy workflow injection step. This step will
// 'include' the workflow found the path given into the parent workflow. Unlike
// a Subworkflow the included workflow will exist in the same namespace
// as the parent and have access to all its resources.
type IncludeWorkflow struct {
Path string
Vars map[string]string `json:",omitempty"`
Workflow *Workflow `json:",omitempty"`
}
func (i *IncludeWorkflow) populate(ctx context.Context, s *Step) DError {
if i.Path != "" {
var err error
if i.Workflow, err = s.w.NewIncludedWorkflowFromFile(i.Path); err != nil {
return newErr("failed to parse duration for step includeworkflow", err)
}
} else {
if i.Workflow == nil {
return Errf(fmt.Sprintf("IncludeWorkflow %q does not have a workflow", s.name))
}
s.w.includeWorkflow(i.Workflow)
}
i.Workflow.id = i.Workflow.parent.id
i.Workflow.username = i.Workflow.parent.username
i.Workflow.ComputeClient = i.Workflow.parent.ComputeClient
i.Workflow.StorageClient = i.Workflow.parent.StorageClient
i.Workflow.cloudLoggingClient = i.Workflow.parent.cloudLoggingClient
i.Workflow.GCSPath = i.Workflow.parent.GCSPath
i.Workflow.Name = i.Workflow.parent.Name
i.Workflow.Project = i.Workflow.parent.Project
i.Workflow.Zone = i.Workflow.parent.Zone
i.Workflow.DefaultTimeout = i.Workflow.parent.DefaultTimeout
i.Workflow.autovars = i.Workflow.parent.autovars
i.Workflow.bucket = i.Workflow.parent.bucket
i.Workflow.scratchPath = i.Workflow.parent.scratchPath
i.Workflow.sourcesPath = i.Workflow.parent.sourcesPath
i.Workflow.logsPath = i.Workflow.parent.logsPath
i.Workflow.outsPath = i.Workflow.parent.outsPath
i.Workflow.externalLogging = i.Workflow.parent.externalLogging
i.Workflow.Logger = i.Workflow.parent.Logger
i.Workflow.Name = s.name
i.Workflow.DefaultTimeout = s.Timeout
var errs DError
Loop:
for k, v := range i.Vars {
for wv := range i.Workflow.Vars {
if k == wv {
i.Workflow.AddVar(k, v)
continue Loop
}
}
errs = addErrs(errs, Errf("unknown workflow Var %q passed to IncludeWorkflow %q", k, s.name))
}
if errs != nil {
return errs
}
var replacements []string
for k, v := range i.Workflow.autovars {
if k == "NAME" {
v = s.name
}
if k == "WFDIR" {
v = i.Workflow.workflowDir
}
replacements = append(replacements, fmt.Sprintf("${%s}", k), v)
}
substitute(reflect.ValueOf(i.Workflow).Elem(), strings.NewReplacer(replacements...))
for k, v := range i.Workflow.Vars {
replacements = append(replacements, fmt.Sprintf("${%s}", k), v.Value)
}
substitute(reflect.ValueOf(i.Workflow).Elem(), strings.NewReplacer(replacements...))
// We do this here, and not in validate, as embedded startup scripts could
// have what we think are daisy variables.
if err := i.Workflow.validateVarsSubbed(); err != nil {
return err
}
if err := i.Workflow.substituteSourceVars(ctx, reflect.ValueOf(i.Workflow).Elem()); err != nil {
return err
}
for name, st := range i.Workflow.Steps {
st.name = name
st.w = i.Workflow
if err := st.w.populateStep(ctx, st); err != nil {
return err
}
}
// Copy Sources up to parent resolving relative paths as we go.
for k, v := range i.Workflow.Sources {
if v == "" {
continue
}
if _, ok := s.w.Sources[k]; ok {
return Errf("source %q already exists in workflow", k)
}
if s.w.Sources == nil {
s.w.Sources = map[string]string{}
}
if _, _, err := splitGCSPath(v); err != nil && !filepath.IsAbs(v) {
v = filepath.Join(i.Workflow.workflowDir, v)
}
s.w.Sources[k] = v
}
return nil
}
func (i *IncludeWorkflow) validate(ctx context.Context, s *Step) DError {
return i.Workflow.validate(ctx)
}
func (i *IncludeWorkflow) run(ctx context.Context, s *Step) DError {
return i.Workflow.run(ctx)
}
| 1 | 9,645 | This should be removed? If not, typo in incldued_wf | GoogleCloudPlatform-compute-image-tools | go |
@@ -19,6 +19,7 @@ export const variant = ({
export default variant
-export const buttonStyle = variant({ key: 'buttons' })
+export const buttonStyle =
+ variant({ key: 'buttons' }) || variant({ key: 'buttonStyles' })
export const textStyle = variant({ key: 'textStyles', prop: 'textStyle' })
export const colorStyle = variant({ key: 'colorStyles', prop: 'colors' }) | 1 | import { get, createParser } from '@styled-system/core'
export const variant = ({
scale,
prop = 'variant',
// shim for v4 API
key,
}) => {
const sx = (value, scale) => {
return get(scale, value, null)
}
sx.scale = scale || key
const config = {
[prop]: sx,
}
const parser = createParser(config)
return parser
}
export default variant
export const buttonStyle = variant({ key: 'buttons' })
export const textStyle = variant({ key: 'textStyles', prop: 'textStyle' })
export const colorStyle = variant({ key: 'colorStyles', prop: 'colors' })
| 1 | 5,218 | Sorry for the delay on this! It looks like Circle CI isn't running tests on some of the PRs, but this doesn't look like it would work I might be missing something, but are the tests all passing locally? | styled-system-styled-system | js |
@@ -68,6 +68,15 @@ type MatchFile struct {
//
// Default is first_exist.
TryPolicy string `json:"try_policy,omitempty"`
+
+ // A list of delimiters to use to split the path in two
+ // when trying files. If empty, no splitting will
+ // occur, and the path will be tried as-is. For each
+ // split value, the left-hand side of the split,
+ // including the split value, will be the path tried.
+ // For example, the path `/remote.php/dav/` using the
+ // split value `.php` would try the file `/remote.php`.
+ SplitPath []string `json:"split_path,omitempty"`
}
// CaddyModule returns the Caddy module information. | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileserver
import (
"fmt"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
func init() {
caddy.RegisterModule(MatchFile{})
}
// MatchFile is an HTTP request matcher that can match
// requests based upon file existence.
//
// Upon matching, two new placeholders will be made
// available:
//
// - `{http.matchers.file.relative}` The root-relative
// path of the file. This is often useful when rewriting
// requests.
// - `{http.matchers.file.absolute}` The absolute path
// of the matched file.
type MatchFile struct {
// The root directory, used for creating absolute
// file paths, and required when working with
// relative paths; if not specified, `{http.vars.root}`
// will be used, if set; otherwise, the current
// directory is assumed. Accepts placeholders.
Root string `json:"root,omitempty"`
// The list of files to try. Each path here is
// considered related to Root. If nil, the request
// URL's path will be assumed. Files and
// directories are treated distinctly, so to match
// a directory, the filepath MUST end in a forward
// slash `/`. To match a regular file, there must
// be no trailing slash. Accepts placeholders.
TryFiles []string `json:"try_files,omitempty"`
// How to choose a file in TryFiles. Can be:
//
// - first_exist
// - smallest_size
// - largest_size
// - most_recently_modified
//
// Default is first_exist.
TryPolicy string `json:"try_policy,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (MatchFile) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.file",
New: func() caddy.Module { return new(MatchFile) },
}
}
// UnmarshalCaddyfile sets up the matcher from Caddyfile tokens. Syntax:
//
// file {
// root <path>
// try_files <files...>
// try_policy first_exist|smallest_size|largest_size|most_recently_modified
// }
//
func (m *MatchFile) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
for d.NextBlock(0) {
switch d.Val() {
case "root":
if !d.NextArg() {
return d.ArgErr()
}
m.Root = d.Val()
case "try_files":
m.TryFiles = d.RemainingArgs()
if len(m.TryFiles) == 0 {
return d.ArgErr()
}
case "try_policy":
if !d.NextArg() {
return d.ArgErr()
}
m.TryPolicy = d.Val()
}
}
}
return nil
}
// Provision sets up m's defaults.
func (m *MatchFile) Provision(_ caddy.Context) error {
if m.Root == "" {
m.Root = "{http.vars.root}"
}
return nil
}
// Validate ensures m has a valid configuration.
func (m MatchFile) Validate() error {
switch m.TryPolicy {
case "",
tryPolicyFirstExist,
tryPolicyLargestSize,
tryPolicySmallestSize,
tryPolicyMostRecentlyMod:
default:
return fmt.Errorf("unknown try policy %s", m.TryPolicy)
}
return nil
}
// Match returns true if r matches m. Returns true
// if a file was matched. If so, two placeholders
// will be available:
// - http.matchers.file.relative
// - http.matchers.file.absolute
func (m MatchFile) Match(r *http.Request) bool {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
rel, abs, matched := m.selectFile(r)
if matched {
repl.Set("http.matchers.file.relative", rel)
repl.Set("http.matchers.file.absolute", abs)
}
return matched
}
// selectFile chooses a file according to m.TryPolicy by appending
// the paths in m.TryFiles to m.Root, with placeholder replacements.
// It returns the root-relative path to the matched file, the full
// or absolute path, and whether a match was made.
func (m MatchFile) selectFile(r *http.Request) (rel, abs string, matched bool) {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
root := repl.ReplaceAll(m.Root, ".")
// if list of files to try was omitted entirely,
// assume URL path
if m.TryFiles == nil {
// m is not a pointer, so this is safe
m.TryFiles = []string{r.URL.Path}
}
switch m.TryPolicy {
case "", tryPolicyFirstExist:
for _, f := range m.TryFiles {
suffix := path.Clean(repl.ReplaceAll(f, ""))
fullpath := sanitizedPathJoin(root, suffix)
if strictFileExists(fullpath) {
return suffix, fullpath, true
}
}
case tryPolicyLargestSize:
var largestSize int64
var largestFilename string
var largestSuffix string
for _, f := range m.TryFiles {
suffix := path.Clean(repl.ReplaceAll(f, ""))
fullpath := sanitizedPathJoin(root, suffix)
info, err := os.Stat(fullpath)
if err == nil && info.Size() > largestSize {
largestSize = info.Size()
largestFilename = fullpath
largestSuffix = suffix
}
}
return largestSuffix, largestFilename, true
case tryPolicySmallestSize:
var smallestSize int64
var smallestFilename string
var smallestSuffix string
for _, f := range m.TryFiles {
suffix := path.Clean(repl.ReplaceAll(f, ""))
fullpath := sanitizedPathJoin(root, suffix)
info, err := os.Stat(fullpath)
if err == nil && (smallestSize == 0 || info.Size() < smallestSize) {
smallestSize = info.Size()
smallestFilename = fullpath
smallestSuffix = suffix
}
}
return smallestSuffix, smallestFilename, true
case tryPolicyMostRecentlyMod:
var recentDate time.Time
var recentFilename string
var recentSuffix string
for _, f := range m.TryFiles {
suffix := path.Clean(repl.ReplaceAll(f, ""))
fullpath := sanitizedPathJoin(root, suffix)
info, err := os.Stat(fullpath)
if err == nil &&
(recentDate.IsZero() || info.ModTime().After(recentDate)) {
recentDate = info.ModTime()
recentFilename = fullpath
recentSuffix = suffix
}
}
return recentSuffix, recentFilename, true
}
return
}
// strictFileExists returns true if file exists
// and matches the convention of the given file
// path. If the path ends in a forward slash,
// the file must also be a directory; if it does
// NOT end in a forward slash, the file must NOT
// be a directory.
func strictFileExists(file string) bool {
stat, err := os.Stat(file)
if err != nil {
// in reality, this can be any error
// such as permission or even obscure
// ones like "is not a directory" (when
// trying to stat a file within a file);
// in those cases we can't be sure if
// the file exists, so we just treat any
// error as if it does not exist; see
// https://stackoverflow.com/a/12518877/1048862
return false
}
if strings.HasSuffix(file, "/") {
// by convention, file paths ending
// in a slash must be a directory
return stat.IsDir()
}
// by convention, file paths NOT ending
// in a slash must NOT be a directory
return !stat.IsDir()
}
const (
tryPolicyFirstExist = "first_exist"
tryPolicyLargestSize = "largest_size"
tryPolicySmallestSize = "smallest_size"
tryPolicyMostRecentlyMod = "most_recently_modified"
)
// Interface guards
var (
_ caddy.Validator = (*MatchFile)(nil)
_ caddyhttp.RequestMatcher = (*MatchFile)(nil)
)
| 1 | 14,798 | Do you think the godoc should mention that all delimiters will be suffixed with `/`? | caddyserver-caddy | go |
@@ -111,6 +111,8 @@ public class NavigationURLBar extends FrameLayout {
return;
else if (aURL.startsWith("resource:") || SessionStore.get().isHomeUri(aURL))
aURL = "";
+ else if (aURL.startsWith("data:") && SessionStore.get().isCurrentSessionPrivate())
+ aURL = SessionStore.PRIVATE_BROWSING_URI;
else
index = aURL.indexOf("://");
} | 1 | /* -*- Mode: Java; c-basic-offset: 4; tab-width: 4; indent-tabs-mode: nil; -*-
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.vrbrowser.ui;
import android.content.Context;
import android.content.res.Resources;
import android.text.Editable;
import android.text.SpannableString;
import android.text.TextWatcher;
import android.text.style.ForegroundColorSpan;
import android.util.AttributeSet;
import android.util.TypedValue;
import android.view.KeyEvent;
import android.view.View;
import android.view.animation.Animation;
import android.view.animation.AnimationUtils;
import android.view.inputmethod.EditorInfo;
import android.widget.*;
import org.mozilla.vrbrowser.R;
import org.mozilla.vrbrowser.SessionStore;
import org.mozilla.vrbrowser.search.SearchEngine;
import org.mozilla.vrbrowser.telemetry.TelemetryWrapper;
import java.net.URI;
import java.net.URL;
import java.util.regex.Pattern;
public class NavigationURLBar extends FrameLayout {
private EditText mURL;
private ImageButton mMicrophoneButton;
private ImageView mInsecureIcon;
private ImageView mLoadingView;
private Animation mLoadingAnimation;
private RelativeLayout mURLLeftContainer;
private boolean mIsLoading = false;
private boolean mIsInsecure = false;
private int mDefaultURLLeftPadding = 0;
private int mURLProtocolColor;
private int mURLWebsiteColor;
private Pattern mURLPattern;
private NavigationURLBarDelegate mDelegate;
public interface NavigationURLBarDelegate {
void OnVoiceSearchClicked();
}
public NavigationURLBar(Context context, AttributeSet attrs) {
super(context, attrs);
initialize(context);
}
private void initialize(Context aContext) {
inflate(aContext, R.layout.navigation_url, this);
mURLPattern = Pattern.compile("[\\d\\w][.][\\d\\w]");
mURL = findViewById(R.id.urlEditText);
mURL.setShowSoftInputOnFocus(false);
mURL.setOnEditorActionListener(new TextView.OnEditorActionListener() {
@Override
public boolean onEditorAction(TextView aTextView, int actionId, KeyEvent event) {
if (actionId == EditorInfo.IME_ACTION_DONE) {
handleURLEdit(aTextView.getText().toString());
return true;
}
return false;
}
});
mURL.setOnFocusChangeListener(new OnFocusChangeListener() {
@Override
public void onFocusChange(View view, boolean b) {
if (b && mURL.getText().length() > 0) {
showVoiceSearch(false);
}
}
});
mURL.addTextChangedListener(mURLTextWatcher);
mMicrophoneButton = findViewById(R.id.microphoneButton);
mMicrophoneButton.setOnClickListener(mMicrophoneListener);
mURLLeftContainer = findViewById(R.id.urlLeftContainer);
mInsecureIcon = findViewById(R.id.insecureIcon);
mLoadingView = findViewById(R.id.loadingView);
mLoadingAnimation = AnimationUtils.loadAnimation(aContext, R.anim.loading);
mDefaultURLLeftPadding = mURL.getPaddingLeft();
TypedValue typedValue = new TypedValue();
Resources.Theme theme = aContext.getTheme();
theme.resolveAttribute(R.attr.urlProtocolColor, typedValue, true);
mURLProtocolColor = typedValue.data;
theme.resolveAttribute(R.attr.urlWebsiteColor, typedValue, true);
mURLWebsiteColor = typedValue.data;
// Prevent the URL TextEdit to get focus when user touches something outside of it
setFocusable(true);
setFocusableInTouchMode(true);
setClickable(true);
}
public void setDelegate(NavigationURLBarDelegate delegate) {
mDelegate = delegate;
}
public void setURL(String aURL) {
mURL.removeTextChangedListener(mURLTextWatcher);
int index = -1;
if (aURL != null) {
if (aURL.startsWith("jar:"))
return;
else if (aURL.startsWith("resource:") || SessionStore.get().isHomeUri(aURL))
aURL = "";
else
index = aURL.indexOf("://");
}
mURL.setText(aURL);
if (index > 0) {
SpannableString spannable = new SpannableString(aURL);
ForegroundColorSpan color1 = new ForegroundColorSpan(mURLProtocolColor);
ForegroundColorSpan color2 = new ForegroundColorSpan(mURLWebsiteColor);
spannable.setSpan(color1, 0, index + 3, 0);
spannable.setSpan(color2, index + 3, aURL.length(), 0);
mURL.setText(spannable);
} else {
mURL.setText(aURL);
}
mURL.addTextChangedListener(mURLTextWatcher);
}
public void setURLText(String aText) {
mURL.removeTextChangedListener(mURLTextWatcher);
mURL.setText(aText);
mURL.addTextChangedListener(mURLTextWatcher);
}
public void setIsInsecure(boolean aIsInsecure) {
if (mIsInsecure != aIsInsecure) {
mIsInsecure = aIsInsecure;
syncViews();
}
}
public void setIsLoading(boolean aIsLoading) {
if (mIsLoading != aIsLoading) {
mIsLoading = aIsLoading;
if (mIsLoading) {
mLoadingView.startAnimation(mLoadingAnimation);
} else {
mLoadingView.clearAnimation();
}
syncViews();
}
}
public void showVoiceSearch(boolean enabled) {
if (enabled) {
mMicrophoneButton.setImageResource(R.drawable.ic_icon_microphone);
mMicrophoneButton.setOnClickListener(mMicrophoneListener);
} else {
mMicrophoneButton.setImageResource(R.drawable.ic_icon_clear);
mMicrophoneButton.setOnClickListener(mClearListener);
}
}
private void syncViews() {
boolean showContainer = mIsInsecure || mIsLoading;
int leftPadding = mDefaultURLLeftPadding;
if (showContainer) {
mURLLeftContainer.setVisibility(View.VISIBLE);
mURLLeftContainer.measure(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT);
mLoadingView.setVisibility(mIsLoading ? View.VISIBLE : View.GONE);
mInsecureIcon.setVisibility(!mIsLoading && mIsInsecure ? View.VISIBLE : View.GONE);
leftPadding = mURLLeftContainer.getMeasuredWidth();
}
else {
mURLLeftContainer.setVisibility(View.GONE);
}
mURL.setPadding(leftPadding, mURL.getPaddingTop(), mURL.getPaddingRight(), mURL.getPaddingBottom());
}
protected void handleURLEdit(String text) {
text = text.trim();
URI uri = null;
try {
boolean hasProtocol = text.contains("://");
String urlText = text;
// Detect when the protocol is missing from the URL.
// Look for a separated '.' in the text with no white spaces.
if (!hasProtocol && !urlText.contains(" ") && mURLPattern.matcher(urlText).find()) {
urlText = "https://" + urlText;
hasProtocol = true;
}
if (hasProtocol) {
URL url = new URL(urlText);
uri = url.toURI();
}
}
catch (Exception ex) {
}
String url;
if (uri != null) {
url = uri.toString();
TelemetryWrapper.urlBarEvent(true);
} else if (text.startsWith("about:") || text.startsWith("resource://")) {
url = text;
} else {
url = SearchEngine.get(getContext()).getSearchURL(text);
// Doing search in the URL bar, so sending "aIsURL: false" to telemetry.
TelemetryWrapper.urlBarEvent(false);
}
if (SessionStore.get().getCurrentUri() != url) {
SessionStore.get().loadUri(url);
}
showVoiceSearch(true);
}
public void setPrivateMode(boolean isEnabled) {
if (isEnabled)
mURL.setBackground(getContext().getDrawable(R.drawable.url_background_private));
else
mURL.setBackground(getContext().getDrawable(R.drawable.url_background));
}
@Override
public void setClickable(boolean clickable) {
super.setClickable(clickable);
mURL.setEnabled(clickable);
}
private OnClickListener mMicrophoneListener = new OnClickListener() {
@Override
public void onClick(View view) {
if (mDelegate != null)
mDelegate.OnVoiceSearchClicked();
TelemetryWrapper.voiceInputEvent();
}
};
private OnClickListener mClearListener = new OnClickListener() {
@Override
public void onClick(View view) {
mURL.getText().clear();
}
};
private TextWatcher mURLTextWatcher = new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence charSequence, int i, int i1, int i2) {
}
@Override
public void onTextChanged(CharSequence charSequence, int i, int i1, int i2) {
if (mURL.getText().length() > 0) {
showVoiceSearch(false);
} else {
showVoiceSearch(true);
}
}
@Override
public void afterTextChanged(Editable editable) {
}
};
}
| 1 | 6,317 | what if I want to load my own, a different, data URI in Private Browsing mode? I do this often on desktop (e.g., `data:text/html,×`). admittedly, I wouldn't expect this to be done by a non-developer, but this will certainly cause a minor bug. | MozillaReality-FirefoxReality | java |
@@ -312,6 +312,7 @@ class DeleteFileIndex {
static class Builder {
private final FileIO io;
private final Set<ManifestFile> deleteManifests;
+ private long minSequenceNumber = 0L;
private Map<Integer, PartitionSpec> specsById = null;
private Expression dataFilter = Expressions.alwaysTrue();
private Expression partitionFilter = Expressions.alwaysTrue(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.ManifestEvaluator;
import org.apache.iceberg.expressions.Projections;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.ListMultimap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Multimaps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.Conversions;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.Pair;
import org.apache.iceberg.util.StructLikeWrapper;
import org.apache.iceberg.util.Tasks;
/**
* An index of {@link DeleteFile delete files} by sequence number.
* <p>
* Use {@link #builderFor(FileIO, Iterable)} to construct an index, and {@link #forDataFile(long, DataFile)} or
* {@link #forEntry(ManifestEntry)} to get the the delete files to apply to a given data file.
*/
class DeleteFileIndex {
private final Map<Integer, PartitionSpec> specsById;
private final Map<Integer, Types.StructType> partitionTypeById;
private final Map<Integer, ThreadLocal<StructLikeWrapper>> wrapperById;
private final long[] globalSeqs;
private final DeleteFile[] globalDeletes;
private final Map<Pair<Integer, StructLikeWrapper>, Pair<long[], DeleteFile[]>> sortedDeletesByPartition;
DeleteFileIndex(Map<Integer, PartitionSpec> specsById, long[] globalSeqs, DeleteFile[] globalDeletes,
Map<Pair<Integer, StructLikeWrapper>, Pair<long[], DeleteFile[]>> sortedDeletesByPartition) {
this.specsById = specsById;
ImmutableMap.Builder<Integer, Types.StructType> builder = ImmutableMap.builder();
specsById.forEach((specId, spec) -> builder.put(specId, spec.partitionType()));
this.partitionTypeById = builder.build();
this.wrapperById = Maps.newConcurrentMap();
this.globalSeqs = globalSeqs;
this.globalDeletes = globalDeletes;
this.sortedDeletesByPartition = sortedDeletesByPartition;
}
public boolean isEmpty() {
return (globalDeletes == null || globalDeletes.length == 0) && sortedDeletesByPartition.isEmpty();
}
private StructLikeWrapper newWrapper(int specId) {
return StructLikeWrapper.forType(partitionTypeById.get(specId));
}
private Pair<Integer, StructLikeWrapper> partition(int specId, StructLike struct) {
ThreadLocal<StructLikeWrapper> wrapper = wrapperById.computeIfAbsent(specId,
id -> ThreadLocal.withInitial(() -> newWrapper(id)));
return Pair.of(specId, wrapper.get().set(struct));
}
DeleteFile[] forEntry(ManifestEntry<DataFile> entry) {
return forDataFile(entry.sequenceNumber(), entry.file());
}
DeleteFile[] forDataFile(long sequenceNumber, DataFile file) {
Pair<Integer, StructLikeWrapper> partition = partition(file.specId(), file.partition());
Pair<long[], DeleteFile[]> partitionDeletes = sortedDeletesByPartition.get(partition);
Stream<DeleteFile> matchingDeletes;
if (partitionDeletes == null) {
matchingDeletes = limitBySequenceNumber(sequenceNumber, globalSeqs, globalDeletes);
} else if (globalDeletes == null) {
matchingDeletes = limitBySequenceNumber(sequenceNumber, partitionDeletes.first(), partitionDeletes.second());
} else {
matchingDeletes = Stream.concat(
limitBySequenceNumber(sequenceNumber, globalSeqs, globalDeletes),
limitBySequenceNumber(sequenceNumber, partitionDeletes.first(), partitionDeletes.second()));
}
return matchingDeletes
.filter(deleteFile -> canContainDeletesForFile(file, deleteFile, specsById.get(file.specId()).schema()))
.toArray(DeleteFile[]::new);
}
private static boolean canContainDeletesForFile(DataFile dataFile, DeleteFile deleteFile, Schema schema) {
switch (deleteFile.content()) {
case POSITION_DELETES:
return canContainPosDeletesForFile(dataFile, deleteFile);
case EQUALITY_DELETES:
return canContainEqDeletesForFile(dataFile, deleteFile, schema);
}
return true;
}
private static boolean canContainPosDeletesForFile(DataFile dataFile, DeleteFile deleteFile) {
// check that the delete file can contain the data file's file_path
Map<Integer, ByteBuffer> lowers = deleteFile.lowerBounds();
Map<Integer, ByteBuffer> uppers = deleteFile.upperBounds();
if (lowers == null || uppers == null) {
return true;
}
Type pathType = MetadataColumns.DELETE_FILE_PATH.type();
int pathId = MetadataColumns.DELETE_FILE_PATH.fieldId();
Comparator<CharSequence> comparator = Comparators.charSequences();
ByteBuffer lower = lowers.get(pathId);
if (lower != null && comparator.compare(dataFile.path(), Conversions.fromByteBuffer(pathType, lower)) < 0) {
return false;
}
ByteBuffer upper = uppers.get(pathId);
if (upper != null && comparator.compare(dataFile.path(), Conversions.fromByteBuffer(pathType, upper)) > 0) {
return false;
}
return true;
}
@SuppressWarnings("checkstyle:CyclomaticComplexity")
private static boolean canContainEqDeletesForFile(DataFile dataFile, DeleteFile deleteFile, Schema schema) {
// whether to check data ranges or to assume that the ranges match
// if upper/lower bounds are missing, null counts may still be used to determine delete files can be skipped
boolean checkRanges = dataFile.lowerBounds() != null && dataFile.upperBounds() != null &&
deleteFile.lowerBounds() != null && deleteFile.upperBounds() != null;
Map<Integer, ByteBuffer> dataLowers = dataFile.lowerBounds();
Map<Integer, ByteBuffer> dataUppers = dataFile.upperBounds();
Map<Integer, ByteBuffer> deleteLowers = deleteFile.lowerBounds();
Map<Integer, ByteBuffer> deleteUppers = deleteFile.upperBounds();
Map<Integer, Long> dataNullCounts = dataFile.nullValueCounts();
Map<Integer, Long> dataValueCounts = dataFile.valueCounts();
Map<Integer, Long> deleteNullCounts = deleteFile.nullValueCounts();
Map<Integer, Long> deleteValueCounts = deleteFile.valueCounts();
for (int id : deleteFile.equalityFieldIds()) {
Types.NestedField field = schema.findField(id);
if (!field.type().isPrimitiveType()) {
// stats are not kept for nested types. assume that the delete file may match
continue;
}
if (containsNull(dataNullCounts, field) && containsNull(deleteNullCounts, field)) {
// the data has null values and null has been deleted, so the deletes must be applied
continue;
}
if (allNull(dataNullCounts, dataValueCounts, field) && allNonNull(deleteNullCounts, field)) {
// the data file contains only null values for this field, but there are no deletes for null values
return false;
}
if (allNull(deleteNullCounts, deleteValueCounts, field) && allNonNull(dataNullCounts, field)) {
// the delete file removes only null rows with null for this field, but there are no data rows with null
return false;
}
if (!checkRanges) {
// some upper and lower bounds are missing, assume they match
continue;
}
ByteBuffer dataLower = dataLowers.get(id);
ByteBuffer dataUpper = dataUppers.get(id);
ByteBuffer deleteLower = deleteLowers.get(id);
ByteBuffer deleteUpper = deleteUppers.get(id);
if (dataLower == null || dataUpper == null || deleteLower == null || deleteUpper == null) {
// at least one bound is not known, assume the delete file may match
continue;
}
if (!rangesOverlap(field.type().asPrimitiveType(), dataLower, dataUpper, deleteLower, deleteUpper)) {
// no values overlap between the data file and the deletes
return false;
}
}
return true;
}
private static <T> boolean rangesOverlap(Type.PrimitiveType type,
ByteBuffer dataLowerBuf, ByteBuffer dataUpperBuf,
ByteBuffer deleteLowerBuf, ByteBuffer deleteUpperBuf) {
Comparator<T> comparator = Comparators.forType(type);
T dataLower = Conversions.fromByteBuffer(type, dataLowerBuf);
T dataUpper = Conversions.fromByteBuffer(type, dataUpperBuf);
T deleteLower = Conversions.fromByteBuffer(type, deleteLowerBuf);
T deleteUpper = Conversions.fromByteBuffer(type, deleteUpperBuf);
return comparator.compare(deleteLower, dataUpper) <= 0 && comparator.compare(dataLower, deleteUpper) <= 0;
}
private static boolean allNonNull(Map<Integer, Long> nullValueCounts, Types.NestedField field) {
if (field.isRequired()) {
return true;
}
if (nullValueCounts == null) {
return false;
}
Long nullValueCount = nullValueCounts.get(field.fieldId());
if (nullValueCount == null) {
return false;
}
return nullValueCount <= 0;
}
private static boolean allNull(Map<Integer, Long> nullValueCounts, Map<Integer, Long> valueCounts,
Types.NestedField field) {
if (field.isRequired()) {
return false;
}
if (nullValueCounts == null || valueCounts == null) {
return false;
}
Long nullValueCount = nullValueCounts.get(field.fieldId());
Long valueCount = valueCounts.get(field.fieldId());
if (nullValueCount == null || valueCount == null) {
return false;
}
return nullValueCount.equals(valueCount);
}
private static boolean containsNull(Map<Integer, Long> nullValueCounts, Types.NestedField field) {
if (field.isRequired()) {
return false;
}
if (nullValueCounts == null) {
return true;
}
Long nullValueCount = nullValueCounts.get(field.fieldId());
if (nullValueCount == null) {
return true;
}
return nullValueCount > 0;
}
private static Stream<DeleteFile> limitBySequenceNumber(long sequenceNumber, long[] seqs, DeleteFile[] files) {
if (files == null) {
return Stream.empty();
}
int pos = Arrays.binarySearch(seqs, sequenceNumber);
int start;
if (pos < 0) {
// the sequence number was not found, where it would be inserted is -(pos + 1)
start = -(pos + 1);
} else {
// the sequence number was found, but may not be the first
// find the first delete file with the given sequence number by decrementing the position
start = pos;
while (start > 0 && seqs[start - 1] >= sequenceNumber) {
start -= 1;
}
}
return Arrays.stream(files, start, files.length);
}
static Builder builderFor(FileIO io, Iterable<ManifestFile> deleteManifests) {
return new Builder(io, Sets.newHashSet(deleteManifests));
}
static class Builder {
private final FileIO io;
private final Set<ManifestFile> deleteManifests;
private Map<Integer, PartitionSpec> specsById = null;
private Expression dataFilter = Expressions.alwaysTrue();
private Expression partitionFilter = Expressions.alwaysTrue();
private boolean caseSensitive = true;
private ExecutorService executorService = null;
Builder(FileIO io, Set<ManifestFile> deleteManifests) {
this.io = io;
this.deleteManifests = Sets.newHashSet(deleteManifests);
}
Builder specsById(Map<Integer, PartitionSpec> newSpecsById) {
this.specsById = newSpecsById;
return this;
}
Builder filterData(Expression newDataFilter) {
this.dataFilter = Expressions.and(dataFilter, newDataFilter);
return this;
}
Builder filterPartitions(Expression newPartitionFilter) {
this.partitionFilter = Expressions.and(partitionFilter, newPartitionFilter);
return this;
}
Builder caseSensitive(boolean newCaseSensitive) {
this.caseSensitive = newCaseSensitive;
return this;
}
Builder planWith(ExecutorService newExecutorService) {
this.executorService = newExecutorService;
return this;
}
DeleteFileIndex build() {
// read all of the matching delete manifests in parallel and accumulate the matching files in a queue
Queue<ManifestEntry<DeleteFile>> deleteEntries = new ConcurrentLinkedQueue<>();
Tasks.foreach(deleteManifestReaders())
.stopOnFailure().throwFailureWhenFinished()
.executeWith(executorService)
.run(deleteFile -> {
try (CloseableIterable<ManifestEntry<DeleteFile>> reader = deleteFile) {
for (ManifestEntry<DeleteFile> entry : reader) {
// copy with stats for better filtering against data file stats
deleteEntries.add(entry.copy());
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close");
}
});
// build a map from (specId, partition) to delete file entries
ListMultimap<Pair<Integer, StructLikeWrapper>, ManifestEntry<DeleteFile>> deleteFilesByPartition =
Multimaps.newListMultimap(Maps.newHashMap(), Lists::newArrayList);
for (ManifestEntry<DeleteFile> entry : deleteEntries) {
int specId = entry.file().specId();
StructLikeWrapper wrapper = StructLikeWrapper.forType(specsById.get(specId).partitionType())
.set(entry.file().partition());
deleteFilesByPartition.put(Pair.of(specId, wrapper), entry);
}
// sort the entries in each map value by sequence number and split into sequence numbers and delete files lists
Map<Pair<Integer, StructLikeWrapper>, Pair<long[], DeleteFile[]>> sortedDeletesByPartition = Maps.newHashMap();
// also, separate out equality deletes in an unpartitioned spec that should be applied globally
long[] globalApplySeqs = null;
DeleteFile[] globalDeletes = null;
for (Pair<Integer, StructLikeWrapper> partition : deleteFilesByPartition.keySet()) {
if (specsById.get(partition.first()).isUnpartitioned()) {
Preconditions.checkState(globalDeletes == null, "Detected multiple partition specs with no partitions");
List<Pair<Long, DeleteFile>> eqFilesSortedBySeq = deleteFilesByPartition.get(partition).stream()
.filter(entry -> entry.file().content() == FileContent.EQUALITY_DELETES)
.map(entry ->
// a delete file is indexed by the sequence number it should be applied to
Pair.of(entry.sequenceNumber() - 1, entry.file()))
.sorted(Comparator.comparingLong(Pair::first))
.collect(Collectors.toList());
globalApplySeqs = eqFilesSortedBySeq.stream().mapToLong(Pair::first).toArray();
globalDeletes = eqFilesSortedBySeq.stream().map(Pair::second).toArray(DeleteFile[]::new);
List<Pair<Long, DeleteFile>> posFilesSortedBySeq = deleteFilesByPartition.get(partition).stream()
.filter(entry -> entry.file().content() == FileContent.POSITION_DELETES)
.map(entry -> Pair.of(entry.sequenceNumber(), entry.file()))
.sorted(Comparator.comparingLong(Pair::first))
.collect(Collectors.toList());
long[] seqs = posFilesSortedBySeq.stream().mapToLong(Pair::first).toArray();
DeleteFile[] files = posFilesSortedBySeq.stream().map(Pair::second).toArray(DeleteFile[]::new);
sortedDeletesByPartition.put(partition, Pair.of(seqs, files));
} else {
List<Pair<Long, DeleteFile>> filesSortedBySeq = deleteFilesByPartition.get(partition).stream()
.map(entry -> {
// a delete file is indexed by the sequence number it should be applied to
long applySeq = entry.sequenceNumber() -
(entry.file().content() == FileContent.EQUALITY_DELETES ? 1 : 0);
return Pair.of(applySeq, entry.file());
})
.sorted(Comparator.comparingLong(Pair::first))
.collect(Collectors.toList());
long[] seqs = filesSortedBySeq.stream().mapToLong(Pair::first).toArray();
DeleteFile[] files = filesSortedBySeq.stream().map(Pair::second).toArray(DeleteFile[]::new);
sortedDeletesByPartition.put(partition, Pair.of(seqs, files));
}
}
return new DeleteFileIndex(specsById, globalApplySeqs, globalDeletes, sortedDeletesByPartition);
}
private Iterable<CloseableIterable<ManifestEntry<DeleteFile>>> deleteManifestReaders() {
LoadingCache<Integer, ManifestEvaluator> evalCache = specsById == null ? null :
Caffeine.newBuilder().build(specId -> {
PartitionSpec spec = specsById.get(specId);
return ManifestEvaluator.forPartitionFilter(
Expressions.and(partitionFilter, Projections.inclusive(spec, caseSensitive).project(dataFilter)),
spec, caseSensitive);
});
Iterable<ManifestFile> matchingManifests = evalCache == null ? deleteManifests :
Iterables.filter(deleteManifests, manifest ->
manifest.content() == ManifestContent.DELETES &&
(manifest.hasAddedFiles() || manifest.hasDeletedFiles()) &&
evalCache.get(manifest.partitionSpecId()).eval(manifest));
return Iterables.transform(
matchingManifests,
manifest ->
ManifestFiles.readDeleteManifest(manifest, io, specsById)
.filterRows(dataFilter)
.filterPartitions(partitionFilter)
.caseSensitive(caseSensitive)
.liveEntries()
);
}
}
}
| 1 | 39,643 | I remember the sequence number 0 is kept for the data files for iceberg v1, so in theory the sequence number from delete files should start from 1. So setting it to 0 as the default value sounds correct. | apache-iceberg | java |
@@ -68,7 +68,7 @@ public class ImageVersionDaoImpl implements ImageVersionDao {
+ "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static final String SELECT_IMAGE_VERSION_BASE_QUERY =
"select iv.id, iv.path, iv.description, "
- + "iv.version, cast(replace(iv.version, '.', '') as unsigned integer) as int_version, "
+ + "iv.version, cast(replace(iv.version, '.', '') as integer) as int_version, "
+ "it.name, iv.state, iv.release_tag, iv.created_on, iv.created_by, iv.modified_on, "
+ "iv.modified_by from image_versions iv, image_types it where it.id = iv.type_id";
/** | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.imagemgmt.daos;
import static azkaban.Constants.ImageMgmtConstants.IMAGE_TYPE;
import static azkaban.Constants.ImageMgmtConstants.IMAGE_VERSION;
import static azkaban.Constants.ImageMgmtConstants.VERSION_STATE;
import azkaban.Constants.ImageMgmtConstants;
import azkaban.db.DatabaseOperator;
import azkaban.db.SQLTransaction;
import azkaban.imagemgmt.dto.ImageMetadataRequest;
import azkaban.imagemgmt.exeception.ErrorCode;
import azkaban.imagemgmt.exeception.ImageMgmtDaoException;
import azkaban.imagemgmt.exeception.ImageMgmtException;
import azkaban.imagemgmt.models.ImageType;
import azkaban.imagemgmt.models.ImageVersion;
import azkaban.imagemgmt.models.ImageVersion.State;
import azkaban.imagemgmt.models.ImageVersionRequest;
import com.google.common.collect.Iterables;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.dbutils.ResultSetHandler;
import org.apache.commons.lang.text.StrSubstitutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* DAO Implementation for accessing image version metadata. This class contains implementation for
* methods such as create, get image version metadata etc.
*/
@Singleton
public class ImageVersionDaoImpl implements ImageVersionDao {
private static final Logger log = LoggerFactory.getLogger(ImageVersionDaoImpl.class);
private final DatabaseOperator databaseOperator;
private final ImageTypeDao imageTypeDao;
private static final String INSERT_IMAGE_VERSION_QUERY =
"insert into image_versions ( path, description, version, type_id, state, release_tag, "
+ "created_by, created_on, modified_by, modified_on) "
+ "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
private static final String SELECT_IMAGE_VERSION_BASE_QUERY =
"select iv.id, iv.path, iv.description, "
+ "iv.version, cast(replace(iv.version, '.', '') as unsigned integer) as int_version, "
+ "it.name, iv.state, iv.release_tag, iv.created_on, iv.created_by, iv.modified_on, "
+ "iv.modified_by from image_versions iv, image_types it where it.id = iv.type_id";
/**
* The below query uses calculated column to get the image version in integer format. The query
* consists for two inner queries. One inner query selects the max image version using the
* calculated int_version field for the image types. The second inner query matches the max
* version computed above and selects the corresponding image version records. NOTE: The computed
* column can't be used directly in the where clause hence, two inner tables are created on top of
* the inner queries.
*/
private static final String SELECT_LATEST_ACTIVE_IMAGE_VERSION_QUERY = "select outer_tbl.id, "
+ "outer_tbl.path, outer_tbl.description, outer_tbl.version, outer_tbl.name, outer_tbl.state, "
+ "outer_tbl.release_tag, outer_tbl.created_on, outer_tbl.created_by, outer_tbl.modified_on, "
+ "outer_tbl.modified_by from (select iv.id, iv.path, iv.description, iv.version, "
+ "cast(replace(iv.version, '.', '') as unsigned integer) as int_version, it.name, iv.state, "
+ "iv.release_tag, iv.created_on, iv.created_by, iv.modified_on, iv.modified_by "
+ "from image_versions iv, image_types it where it.id = iv.type_id and iv.state = ? "
+ "and it.name in ( ${image_types} )) "
+ "outer_tbl where outer_tbl.int_version in (select max(inner_tbl.int_version) max_version "
+ "from (select it.name, cast(replace(iv.version, '.', '') as unsigned integer) as int_version "
+ "from image_versions iv, image_types it where it.id = iv.type_id and iv.state = ? "
+ "and it.name in ( ${image_types} )) "
+ "inner_tbl group by inner_tbl.name);";
@Inject
public ImageVersionDaoImpl(DatabaseOperator databaseOperator, ImageTypeDao imageTypeDao) {
this.databaseOperator = databaseOperator;
this.imageTypeDao = imageTypeDao;
}
@Override
public int createImageVersion(ImageVersion imageVersion) {
ImageType imageType = imageTypeDao.getImageTypeByName(imageVersion.getName())
.orElseThrow(() -> new ImageMgmtDaoException("Unable to fetch image type metadata. Invalid "
+ "image type : " + imageVersion.getName()));
SQLTransaction<Long> insertAndGetSpaceId = transOperator -> {
// Passing timestamp from the code base and can be formatted accordingly based on timezone
Timestamp currentTimestamp = Timestamp.valueOf(LocalDateTime.now());
transOperator
.update(INSERT_IMAGE_VERSION_QUERY, imageVersion.getPath(), imageVersion.getDescription(),
imageVersion.getVersion(), imageType.getId(), imageVersion.getState().getStateValue(),
imageVersion.getReleaseTag(), imageVersion.getCreatedBy(), currentTimestamp,
imageVersion.getModifiedBy(), currentTimestamp);
transOperator.getConnection().commit();
return transOperator.getLastInsertId();
};
int imageVersionId = 0;
try {
/* what will happen if there is a partial failure in
any of the below statements?
Ideally all should happen in a transaction */
imageVersionId = databaseOperator.transaction(insertAndGetSpaceId).intValue();
} catch (SQLException e) {
log.error("Unable to create the image version metadata", e);
String errorMessage = "";
// TODO: Find a better way to get the error message. Currently apache common dbutils throws
// sql exception for all the below error scenarios and error message contains complete
// query as well, hence generic error message is thrown.
if (e.getErrorCode() == 1062) {
errorMessage = "Reason: Duplicate key provided for one or more column(s)";
}
if (e.getErrorCode() == 1406) {
errorMessage = "Reason: Data too long for one or more column(s).";
}
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST, "Exception while creating image "
+ "version metadata" + errorMessage);
}
return imageVersionId;
}
@Override
public List<ImageVersion> findImageVersions(ImageMetadataRequest imageMetadataRequest)
throws ImageMgmtException {
List<ImageVersion> imageVersions = new ArrayList<>();
try {
StringBuilder queryBuilder = new StringBuilder(SELECT_IMAGE_VERSION_BASE_QUERY);
List<Object> params = new ArrayList<>();
// Add imageType in the query
if (imageMetadataRequest.getParams().containsKey(ImageMgmtConstants.IMAGE_TYPE)) {
queryBuilder.append(" AND ");
queryBuilder.append(" it.name = ?");
params.add(imageMetadataRequest.getParams().get(ImageMgmtConstants.IMAGE_TYPE));
}
// Add imageVersion in the query if present
if (imageMetadataRequest.getParams().containsKey(ImageMgmtConstants.IMAGE_VERSION)) {
queryBuilder.append(" AND ");
queryBuilder.append(" iv.version = ?");
params.add(imageMetadataRequest.getParams().get(ImageMgmtConstants.IMAGE_VERSION));
}
// Add versionState in the query if present
if (imageMetadataRequest.getParams().containsKey(ImageMgmtConstants.VERSION_STATE)) {
queryBuilder.append(" AND ");
queryBuilder.append(" iv.state = ?");
State versionState = (State) imageMetadataRequest.getParams()
.get(ImageMgmtConstants.VERSION_STATE);
params.add(versionState.getStateValue());
}
log.info("Image version get query : " + queryBuilder.toString());
imageVersions = databaseOperator.query(queryBuilder.toString(),
new FetchImageVersionHandler(), Iterables.toArray(params, Object.class));
} catch (SQLException ex) {
log.error("Exception while fetching image version ", ex);
throw new ImageMgmtDaoException(ErrorCode.NOT_FOUND, "Exception while fetching image "
+ "version");
}
return imageVersions;
}
@Override
public Optional<ImageVersion> getImageVersion(String imageTypeName, String imageVersion,
State versionState) throws ImageMgmtException {
ImageMetadataRequest imageMetadataRequest = ImageMetadataRequest.newBuilder()
.addParam(IMAGE_TYPE, imageTypeName)
.addParam(IMAGE_VERSION, imageVersion)
.addParam(VERSION_STATE, versionState)
.build();
List<ImageVersion> imageVersions = findImageVersions(imageMetadataRequest);
return imageVersions != null && imageVersions.size() > 0 ? Optional.of(imageVersions.get(0))
: Optional.empty();
}
@Override
public List<ImageVersion> getActiveVersionByImageTypes(Set<String> imageTypes)
throws ImageMgmtException {
List<ImageVersion> imageVersions = new ArrayList<>();
try {
// Add outer select clause
StringBuilder inClauseBuilder = new StringBuilder();
for (int i = 0; i < imageTypes.size(); i++) {
inClauseBuilder.append("?,");
}
inClauseBuilder.setLength(inClauseBuilder.length() - 1);
Map<String, String> valueMap = new HashMap<>();
valueMap.put("image_types", inClauseBuilder.toString());
StrSubstitutor strSubstitutor = new StrSubstitutor(valueMap);
String query = strSubstitutor.replace(SELECT_LATEST_ACTIVE_IMAGE_VERSION_QUERY);
log.info("Image version getActiveVersionByImageTypes query : " + query);
List<Object> params = new ArrayList<>();
params.add(State.ACTIVE.getStateValue());
params.addAll(imageTypes);
params.add(State.ACTIVE.getStateValue());
params.addAll(imageTypes);
imageVersions = databaseOperator.query(query,
new FetchImageVersionHandler(), Iterables.toArray(params, Object.class));
} catch (SQLException ex) {
log.error("Exception while fetching image version ", ex);
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST, "Exception while fetching image "
+ "version");
}
return imageVersions;
}
@Override
public void updateImageVersion(ImageVersionRequest imageVersionRequest)
throws ImageMgmtException {
try {
List<Object> params = new ArrayList<>();
StringBuilder queryBuilder = new StringBuilder("update image_versions set ");
if (imageVersionRequest.getPath() != null) {
queryBuilder.append(" path = ?, ");
params.add(imageVersionRequest.getPath());
}
if (imageVersionRequest.getDescription() != null) {
queryBuilder.append(" description = ?, ");
params.add(imageVersionRequest.getDescription());
}
if (imageVersionRequest.getState() != null) {
queryBuilder.append(" state = ?, ");
params.add(imageVersionRequest.getState().getStateValue());
}
queryBuilder.append(" modified_by = ?, modified_on = ?");
params.add(imageVersionRequest.getModifiedBy());
params.add(Timestamp.valueOf(LocalDateTime.now()));
queryBuilder.append(" where id = ? ");
params.add(imageVersionRequest.getId());
databaseOperator.update(queryBuilder.toString(), Iterables.toArray(params, Object.class));
} catch (SQLException ex) {
log.error("Exception while updating image version ", ex);
String errorMessage = "";
// TODO: Find a better way to get the error message. Currently apache common dbutils throws
// sql exception for all the below error scenarios and error message contains complete
// query as well, hence generic error message is thrown.
if (ex.getErrorCode() == 1406) {
errorMessage = "Reason: Data too long for one or more column(s).";
}
throw new ImageMgmtDaoException(ErrorCode.BAD_REQUEST, "Exception while updating image "
+ "version");
}
}
/**
* ResultSetHandler implementation class for fetching image version
*/
public static class FetchImageVersionHandler implements ResultSetHandler<List<ImageVersion>> {
@Override
public List<ImageVersion> handle(ResultSet rs) throws SQLException {
if (!rs.next()) {
return Collections.emptyList();
}
List<ImageVersion> imageVersions = new ArrayList<>();
do {
int id = rs.getInt("id");
String path = rs.getString("path");
String description = rs.getString("description");
String version = rs.getString("version");
String name = rs.getString("name");
String state = rs.getString("state");
String releaseTag = rs.getString("release_tag");
String createdOn = rs.getString("created_on");
String createdBy = rs.getString("created_by");
String modifiedOn = rs.getString("modified_on");
String modifiedBy = rs.getString("modified_by");
ImageVersion imageVersion = new ImageVersion();
imageVersion.setId(id);
imageVersion.setPath(path);
imageVersion.setDescription(description);
imageVersion.setVersion(version);
imageVersion.setState(State.fromStateValue(state));
imageVersion.setReleaseTag(releaseTag);
imageVersion.setName(name);
imageVersion.setCreatedOn(createdOn);
imageVersion.setCreatedBy(createdBy);
imageVersion.setModifiedBy(modifiedBy);
imageVersion.setModifiedOn(modifiedOn);
imageVersions.add(imageVersion);
} while (rs.next());
return imageVersions;
}
}
} | 1 | 20,974 | For all these MYSQL queries unsigned is required. I have tested again and without unsigned these queries are failing. | azkaban-azkaban | java |
@@ -57,6 +57,8 @@ const defaultState = {
editorNoteStatuses: {},
};
+const MAX_HISTORY = 200;
+
const stateUtils = {};
const derivedStateCache_ = {}; | 1 | const Note = require('lib/models/Note.js');
const Folder = require('lib/models/Folder.js');
const ArrayUtils = require('lib/ArrayUtils.js');
const { ALL_NOTES_FILTER_ID } = require('lib/reserved-ids');
const defaultState = {
notes: [],
notesSource: '',
notesParentType: null,
folders: [],
tags: [],
masterKeys: [],
notLoadedMasterKeys: [],
searches: [],
selectedNoteIds: [],
selectedNoteHash: '',
selectedFolderId: null,
selectedTagId: null,
selectedSearchId: null,
selectedItemType: 'note',
lastSelectedNotesIds: {
Folder: {},
Tag: {},
Search: {},
},
showSideMenu: false,
screens: {},
historyCanGoBack: false,
syncStarted: false,
syncReport: {},
searchQuery: '',
settings: {},
sharedData: null,
appState: 'starting',
hasDisabledSyncItems: false,
hasDisabledEncryptionItems: false,
customCss: '',
templates: [],
collapsedFolderIds: [],
clipperServer: {
startState: 'idle',
port: null,
},
decryptionWorker: {
state: 'idle',
itemIndex: 0,
itemCount: 0,
},
selectedNoteTags: [],
resourceFetcher: {
toFetchCount: 0,
},
backwardHistoryNotes: [],
forwardHistoryNotes: [],
plugins: {},
provisionalNoteIds: [],
editorNoteStatuses: {},
};
const stateUtils = {};
const derivedStateCache_ = {};
// Allows, for a given state, to return the same derived
// objects, to prevent unecessary updates on calling components.
const cacheEnabledOutput = (key, output) => {
key = `${key}_${JSON.stringify(output)}`;
if (derivedStateCache_[key]) return derivedStateCache_[key];
derivedStateCache_[key] = output;
return derivedStateCache_[key];
};
stateUtils.notesOrder = function(stateSettings) {
return cacheEnabledOutput('notesOrder', [
{
by: stateSettings['notes.sortOrder.field'],
dir: stateSettings['notes.sortOrder.reverse'] ? 'DESC' : 'ASC',
},
]);
};
stateUtils.foldersOrder = function(stateSettings) {
return cacheEnabledOutput('foldersOrder', [
{
by: stateSettings['folders.sortOrder.field'],
dir: stateSettings['folders.sortOrder.reverse'] ? 'DESC' : 'ASC',
},
]);
};
stateUtils.parentItem = function(state) {
const t = state.notesParentType;
let id = null;
if (t === 'Folder') id = state.selectedFolderId;
if (t === 'Tag') id = state.selectedTagId;
if (t === 'Search') id = state.selectedSearchId;
if (!t || !id) return null;
return { type: t, id: id };
};
stateUtils.lastSelectedNoteIds = function(state) {
const parent = stateUtils.parentItem(state);
if (!parent) return [];
const output = state.lastSelectedNotesIds[parent.type][parent.id];
return output ? output : [];
};
stateUtils.getLastSeenNote = function(state) {
const selectedNoteIds = state.selectedNoteIds;
const notes = state.notes;
if (selectedNoteIds != null && selectedNoteIds.length > 0) {
const currNote = notes.find(note => note.id === selectedNoteIds[0]);
if (currNote != null) {
return {
id: currNote.id,
parent_id: currNote.parent_id,
};
}
}
};
function arrayHasEncryptedItems(array) {
for (let i = 0; i < array.length; i++) {
if (array[i].encryption_applied) return true;
}
return false;
}
function stateHasEncryptedItems(state) {
if (arrayHasEncryptedItems(state.notes)) return true;
if (arrayHasEncryptedItems(state.folders)) return true;
if (arrayHasEncryptedItems(state.tags)) return true;
return false;
}
function folderSetCollapsed(state, action) {
const collapsedFolderIds = state.collapsedFolderIds.slice();
const idx = collapsedFolderIds.indexOf(action.id);
if (action.collapsed) {
if (idx >= 0) return state;
collapsedFolderIds.push(action.id);
} else {
if (idx < 0) return state;
collapsedFolderIds.splice(idx, 1);
}
const newState = Object.assign({}, state);
newState.collapsedFolderIds = collapsedFolderIds;
return newState;
}
// When deleting a note, tag or folder
function handleItemDelete(state, action) {
const map = {
FOLDER_DELETE: ['folders', 'selectedFolderId', true],
NOTE_DELETE: ['notes', 'selectedNoteIds', false],
TAG_DELETE: ['tags', 'selectedTagId', true],
SEARCH_DELETE: ['searches', 'selectedSearchId', true],
};
const listKey = map[action.type][0];
const selectedItemKey = map[action.type][1];
const isSingular = map[action.type][2];
const selectedItemKeys = isSingular ? [state[selectedItemKey]] : state[selectedItemKey];
const isSelected = selectedItemKeys.includes(action.id);
const items = state[listKey];
const newItems = [];
let newSelectedIndexes = [];
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (isSelected) {
// the selected item is deleted so select the following item
// if multiple items are selected then just use the first one
if (selectedItemKeys[0] == item.id) {
newSelectedIndexes.push(newItems.length);
}
} else {
// the selected item/s is not deleted so keep it selected
if (selectedItemKeys.includes(item.id)) {
newSelectedIndexes.push(newItems.length);
}
}
if (item.id == action.id) {
continue;
}
newItems.push(item);
}
if (newItems.length == 0) {
newSelectedIndexes = []; // no remaining items so no selection
} else if (newSelectedIndexes.length == 0) {
newSelectedIndexes.push(0); // no selection exists so select the top
} else {
// when the items at end of list are deleted then select the end
for (let i = 0; i < newSelectedIndexes.length; i++) {
if (newSelectedIndexes[i] >= newItems.length) {
newSelectedIndexes = [newItems.length - 1];
break;
}
}
}
const newState = Object.assign({}, state);
newState[listKey] = newItems;
if (listKey === 'notes') {
newState.backwardHistoryNotes = newState.backwardHistoryNotes.filter(note => note.id != action.id);
newState.forwardHistoryNotes = newState.forwardHistoryNotes.filter(note => note.id != action.id);
}
if (listKey === 'folders') {
newState.backwardHistoryNotes = newState.backwardHistoryNotes.filter(note => note.parent_id != action.id);
newState.forwardHistoryNotes = newState.forwardHistoryNotes.filter(note => note.parent_id != action.id);
}
const newIds = [];
for (let i = 0; i < newSelectedIndexes.length; i++) {
newIds.push(newItems[newSelectedIndexes[i]].id);
}
newState[selectedItemKey] = isSingular ? newIds[0] : newIds;
if ((newIds.length == 0) && newState.notesParentType !== 'Folder') {
newState.notesParentType = 'Folder';
}
return newState;
}
function updateOneItem(state, action, keyName = '') {
let itemsKey = null;
if (keyName) { itemsKey = keyName; } else {
if (action.type === 'TAG_UPDATE_ONE') itemsKey = 'tags';
if (action.type === 'FOLDER_UPDATE_ONE') itemsKey = 'folders';
if (action.type === 'MASTERKEY_UPDATE_ONE') itemsKey = 'masterKeys';
}
const newItems = state[itemsKey].splice(0);
const item = action.item;
let found = false;
for (let i = 0; i < newItems.length; i++) {
const n = newItems[i];
if (n.id == item.id) {
newItems[i] = Object.assign(newItems[i], item);
found = true;
break;
}
}
if (!found) newItems.push(item);
const newState = Object.assign({}, state);
newState[itemsKey] = newItems;
return newState;
}
function defaultNotesParentType(state, exclusion) {
let newNotesParentType = null;
if (exclusion !== 'Folder' && state.selectedFolderId) {
newNotesParentType = 'Folder';
} else if (exclusion !== 'Tag' && state.selectedTagId) {
newNotesParentType = 'Tag';
} else if (exclusion !== 'Search' && state.selectedSearchId) {
newNotesParentType = 'Search';
}
return newNotesParentType;
}
function changeSelectedFolder(state, action, options = null) {
if (!options) options = {};
const newState = Object.assign({}, state);
// Save the last seen note so that back will return to it.
if (action.type === 'FOLDER_SELECT' && action.historyAction == 'goto') {
const backwardHistoryNotes = newState.backwardHistoryNotes.slice();
let forwardHistoryNotes = newState.forwardHistoryNotes.slice();
// Don't update history if going to the same note again.
const lastSeenNote = stateUtils.getLastSeenNote(state);
if (lastSeenNote != null && action.id != lastSeenNote.id) {
forwardHistoryNotes = [];
backwardHistoryNotes.push(Object.assign({}, lastSeenNote));
}
newState.backwardHistoryNotes = backwardHistoryNotes;
newState.forwardHistoryNotes = forwardHistoryNotes;
}
newState.selectedFolderId = 'folderId' in action ? action.folderId : action.id;
if (!newState.selectedFolderId) {
newState.notesParentType = defaultNotesParentType(state, 'Folder');
} else {
newState.notesParentType = 'Folder';
}
if (newState.selectedFolderId === state.selectedFolderId && newState.notesParentType === state.notesParentType) return state;
if (options.clearSelectedNoteIds) newState.selectedNoteIds = [];
return newState;
}
function recordLastSelectedNoteIds(state, noteIds) {
const newOnes = Object.assign({}, state.lastSelectedNotesIds);
const parent = stateUtils.parentItem(state);
if (!parent) return state;
newOnes[parent.type][parent.id] = noteIds.slice();
return Object.assign({}, state, {
lastSelectedNotesIds: newOnes,
});
}
function changeSelectedNotes(state, action, options = null) {
if (!options) options = {};
let noteIds = [];
if (action.id) noteIds = [action.id];
if (action.ids) noteIds = action.ids;
if (action.noteId) noteIds = [action.noteId];
let newState = Object.assign({}, state);
if (action.type === 'NOTE_SELECT') {
newState.selectedNoteIds = noteIds;
newState.selectedNoteHash = action.hash ? action.hash : '';
const backwardHistoryNotes = newState.backwardHistoryNotes.slice();
let forwardHistoryNotes = newState.forwardHistoryNotes.slice();
// The historyAction property is only used for user-initiated actions and tells how
// the history stack should be handled. That property should not be present for
// programmatic navigation. Possible values are:
// - "goto": When going to a note, but not via the back/forward arrows.
// - "pop": When clicking on the Back arrow
// - "push": When clicking on the Forward arrow
const lastSeenNote = stateUtils.getLastSeenNote(state);
if (action.historyAction == 'goto' && lastSeenNote != null && action.id != lastSeenNote.id) {
forwardHistoryNotes = [];
backwardHistoryNotes.push(Object.assign({}, lastSeenNote));
} else if (action.historyAction === 'pop' && lastSeenNote != null) {
if (forwardHistoryNotes.length === 0 || lastSeenNote.id != forwardHistoryNotes[forwardHistoryNotes.length - 1].id) {
forwardHistoryNotes.push(Object.assign({}, lastSeenNote));
}
backwardHistoryNotes.pop();
} else if (action.historyAction === 'push' && lastSeenNote != null) {
if (backwardHistoryNotes.length === 0 || lastSeenNote.id != backwardHistoryNotes[backwardHistoryNotes.length - 1].id) {
backwardHistoryNotes.push(Object.assign({}, lastSeenNote));
}
forwardHistoryNotes.pop();
}
newState.backwardHistoryNotes = backwardHistoryNotes;
newState.forwardHistoryNotes = forwardHistoryNotes;
} else if (action.type === 'NOTE_SELECT_ADD') {
if (!noteIds.length) return state;
newState.selectedNoteIds = ArrayUtils.unique(newState.selectedNoteIds.concat(noteIds));
} else if (action.type === 'NOTE_SELECT_REMOVE') {
if (!noteIds.length) return state; // Nothing to unselect
if (state.selectedNoteIds.length <= 1) return state; // Cannot unselect the last note
const newSelectedNoteIds = [];
for (let i = 0; i < newState.selectedNoteIds.length; i++) {
const id = newState.selectedNoteIds[i];
if (noteIds.indexOf(id) >= 0) continue;
newSelectedNoteIds.push(id);
}
newState.selectedNoteIds = newSelectedNoteIds;
} else if (action.type === 'NOTE_SELECT_TOGGLE') {
if (!noteIds.length) return state;
if (newState.selectedNoteIds.indexOf(noteIds[0]) >= 0) {
newState = changeSelectedNotes(state, { type: 'NOTE_SELECT_REMOVE', id: noteIds[0] });
} else {
newState = changeSelectedNotes(state, { type: 'NOTE_SELECT_ADD', id: noteIds[0] });
}
} else {
throw new Error('Unreachable');
}
newState = recordLastSelectedNoteIds(newState, newState.selectedNoteIds);
return newState;
}
function removeItemFromArray(array, property, value) {
for (let i = 0; i !== array.length; ++i) {
const currentItem = array[i];
if (currentItem[property] === value) {
array.splice(i, 1);
break;
}
}
return array;
}
const reducer = (state = defaultState, action) => {
let newState = state;
try {
switch (action.type) {
case 'NOTE_SELECT':
case 'NOTE_SELECT_ADD':
case 'NOTE_SELECT_REMOVE':
case 'NOTE_SELECT_TOGGLE':
newState = changeSelectedNotes(state, action);
break;
case 'NOTE_SELECT_EXTEND':
{
newState = Object.assign({}, state);
if (!newState.selectedNoteIds.length) {
newState.selectedNoteIds = [action.id];
} else {
const selectRangeId1 = state.selectedNoteIds[state.selectedNoteIds.length - 1];
const selectRangeId2 = action.id;
if (selectRangeId1 === selectRangeId2) return state;
const newSelectedNoteIds = state.selectedNoteIds.slice();
let selectionStarted = false;
for (let i = 0; i < state.notes.length; i++) {
const id = state.notes[i].id;
if (!selectionStarted && (id === selectRangeId1 || id === selectRangeId2)) {
selectionStarted = true;
if (newSelectedNoteIds.indexOf(id) < 0) newSelectedNoteIds.push(id);
continue;
} else if (selectionStarted && (id === selectRangeId1 || id === selectRangeId2)) {
if (newSelectedNoteIds.indexOf(id) < 0) newSelectedNoteIds.push(id);
break;
}
if (selectionStarted && newSelectedNoteIds.indexOf(id) < 0) {
newSelectedNoteIds.push(id);
}
}
newState.selectedNoteIds = newSelectedNoteIds;
}
}
break;
case 'NOTE_SELECT_ALL':
newState = Object.assign({}, state);
newState.selectedNoteIds = newState.notes.map(n => n.id);
break;
case 'NOTE_SELECT_ALL_TOGGLE': {
newState = Object.assign({}, state);
const allSelected = state.notes.every(n => state.selectedNoteIds.includes(n.id));
if (allSelected) {
newState.selectedNoteIds = [];
} else {
newState.selectedNoteIds = newState.notes.map(n => n.id);
}
break;
}
case 'SMART_FILTER_SELECT':
newState = Object.assign({}, state);
newState.notesParentType = 'SmartFilter';
newState.selectedSmartFilterId = action.id;
break;
case 'FOLDER_SELECT':
newState = changeSelectedFolder(state, action, { clearSelectedNoteIds: true });
break;
case 'FOLDER_AND_NOTE_SELECT':
{
newState = changeSelectedFolder(state, action);
const noteSelectAction = Object.assign({}, action, { type: 'NOTE_SELECT' });
newState = changeSelectedNotes(newState, noteSelectAction);
}
break;
case 'SETTING_UPDATE_ALL':
newState = Object.assign({}, state);
newState.settings = action.settings;
break;
case 'SETTING_UPDATE_ONE':
{
newState = Object.assign({}, state);
const newSettings = Object.assign({}, state.settings);
newSettings[action.key] = action.value;
newState.settings = newSettings;
}
break;
case 'NOTE_PROVISIONAL_FLAG_CLEAR':
{
const newIds = ArrayUtils.removeElement(state.provisionalNoteIds, action.id);
if (newIds !== state.provisionalNoteIds) {
newState = Object.assign({}, state, { provisionalNoteIds: newIds });
}
}
break;
// Replace all the notes with the provided array
case 'NOTE_UPDATE_ALL':
newState = Object.assign({}, state);
newState.notes = action.notes;
newState.notesSource = action.notesSource;
break;
// Insert the note into the note list if it's new, or
// update it within the note array if it already exists.
case 'NOTE_UPDATE_ONE':
{
const modNote = action.note;
const isViewingAllNotes = (state.notesParentType === 'SmartFilter' && state.selectedSmartFilterId === ALL_NOTES_FILTER_ID);
const noteIsInFolder = function(note, folderId) {
if (note.is_conflict) return folderId === Folder.conflictFolderId();
if (!('parent_id' in modNote) || note.parent_id == folderId) return true;
return false;
};
let movedNotePreviousIndex = 0;
let noteFolderHasChanged = false;
let newNotes = state.notes.slice();
let found = false;
for (let i = 0; i < newNotes.length; i++) {
const n = newNotes[i];
if (n.id == modNote.id) {
// Note is still in the same folder
if (isViewingAllNotes || noteIsInFolder(modNote, n.parent_id)) {
// Merge the properties that have changed (in modNote) into
// the object we already have.
newNotes[i] = Object.assign({}, newNotes[i]);
for (const n in modNote) {
if (!modNote.hasOwnProperty(n)) continue;
newNotes[i][n] = modNote[n];
}
} else {
// Note has moved to a different folder
newNotes.splice(i, 1);
noteFolderHasChanged = true;
movedNotePreviousIndex = i;
}
found = true;
break;
}
}
// Note was not found - if the current folder is the same as the note folder,
// add it to it.
if (!found) {
if (isViewingAllNotes || noteIsInFolder(modNote, state.selectedFolderId)) {
newNotes.push(modNote);
}
}
// newNotes = Note.sortNotes(newNotes, state.notesOrder, newState.settings.uncompletedTodosOnTop);
newNotes = Note.sortNotes(newNotes, stateUtils.notesOrder(state.settings), newState.settings.uncompletedTodosOnTop);
newState = Object.assign({}, state);
newState.notes = newNotes;
if (noteFolderHasChanged) {
let newIndex = movedNotePreviousIndex;
if (newIndex >= newNotes.length) newIndex = newNotes.length - 1;
if (!newNotes.length) newIndex = -1;
newState.selectedNoteIds = newIndex >= 0 ? [newNotes[newIndex].id] : [];
}
if (action.provisional) {
newState.provisionalNoteIds.push(modNote.id);
} else {
const idx = newState.provisionalNoteIds.indexOf(modNote.id);
if (idx >= 0) {
const t = newState.provisionalNoteIds.slice();
t.splice(idx, 1);
newState.provisionalNoteIds = t;
}
}
}
break;
case 'NOTE_DELETE':
{
newState = handleItemDelete(state, action);
const idx = newState.provisionalNoteIds.indexOf(action.id);
if (idx >= 0) {
const t = newState.provisionalNoteIds.slice();
t.splice(idx, 1);
newState.provisionalNoteIds = t;
}
}
break;
case 'TAG_DELETE':
newState = handleItemDelete(state, action);
newState.selectedNoteTags = removeItemFromArray(newState.selectedNoteTags.splice(0), 'id', action.id);
break;
case 'FOLDER_UPDATE_ALL':
newState = Object.assign({}, state);
newState.folders = action.items;
break;
case 'FOLDER_SET_COLLAPSED':
newState = folderSetCollapsed(state, action);
break;
case 'FOLDER_TOGGLE':
if (state.collapsedFolderIds.indexOf(action.id) >= 0) {
newState = folderSetCollapsed(state, Object.assign({ collapsed: false }, action));
} else {
newState = folderSetCollapsed(state, Object.assign({ collapsed: true }, action));
}
break;
case 'FOLDER_SET_COLLAPSED_ALL':
newState = Object.assign({}, state);
newState.collapsedFolderIds = action.ids.slice();
break;
case 'TAG_UPDATE_ALL':
newState = Object.assign({}, state);
newState.tags = action.items;
break;
case 'TAG_SELECT':
newState = Object.assign({}, state);
newState.selectedTagId = action.id;
if (!action.id) {
newState.notesParentType = defaultNotesParentType(state, 'Tag');
} else {
newState.notesParentType = 'Tag';
}
newState.selectedNoteIds = [];
break;
case 'TAG_UPDATE_ONE':
{
// We only want to update the selected note tags if the tag belongs to the currently open note
const selectedNoteHasTag = !!state.selectedNoteTags.find(tag => tag.id === action.item.id);
newState = updateOneItem(state, action);
if (selectedNoteHasTag) newState = updateOneItem(newState, action, 'selectedNoteTags');
}
break;
case 'NOTE_TAG_REMOVE':
{
newState = updateOneItem(state, action, 'tags');
const tagRemoved = action.item;
newState.selectedNoteTags = removeItemFromArray(newState.selectedNoteTags.splice(0), 'id', tagRemoved.id);
}
break;
case 'EDITOR_NOTE_STATUS_SET':
{
const newStatuses = Object.assign({}, state.editorNoteStatuses);
newStatuses[action.id] = action.status;
newState = Object.assign({}, state, { editorNoteStatuses: newStatuses });
}
break;
case 'EDITOR_NOTE_STATUS_REMOVE':
{
const newStatuses = Object.assign({}, state.editorNoteStatuses);
delete newStatuses[action.id];
newState = Object.assign({}, state, { editorNoteStatuses: newStatuses });
}
break;
case 'FOLDER_UPDATE_ONE':
case 'MASTERKEY_UPDATE_ONE':
newState = updateOneItem(state, action);
break;
case 'FOLDER_DELETE':
newState = handleItemDelete(state, action);
break;
case 'MASTERKEY_UPDATE_ALL':
newState = Object.assign({}, state);
newState.masterKeys = action.items;
break;
case 'MASTERKEY_SET_NOT_LOADED':
newState = Object.assign({}, state);
newState.notLoadedMasterKeys = action.ids;
break;
case 'MASTERKEY_ADD_NOT_LOADED':
{
if (state.notLoadedMasterKeys.indexOf(action.id) < 0) {
newState = Object.assign({}, state);
const keys = newState.notLoadedMasterKeys.slice();
keys.push(action.id);
newState.notLoadedMasterKeys = keys;
}
}
break;
case 'MASTERKEY_REMOVE_NOT_LOADED':
{
const ids = action.id ? [action.id] : action.ids;
for (let i = 0; i < ids.length; i++) {
const id = ids[i];
const index = state.notLoadedMasterKeys.indexOf(id);
if (index >= 0) {
newState = Object.assign({}, state);
const keys = newState.notLoadedMasterKeys.slice();
keys.splice(index, 1);
newState.notLoadedMasterKeys = keys;
}
}
}
break;
case 'SYNC_STARTED':
newState = Object.assign({}, state);
newState.syncStarted = true;
break;
case 'SYNC_COMPLETED':
newState = Object.assign({}, state);
newState.syncStarted = false;
break;
case 'SYNC_REPORT_UPDATE':
newState = Object.assign({}, state);
newState.syncReport = action.report;
break;
case 'SEARCH_QUERY':
newState = Object.assign({}, state);
newState.searchQuery = action.query.trim();
break;
case 'SEARCH_ADD':
{
newState = Object.assign({}, state);
const searches = newState.searches.slice();
searches.push(action.search);
newState.searches = searches;
}
break;
case 'SEARCH_UPDATE':
{
newState = Object.assign({}, state);
const searches = newState.searches.slice();
let found = false;
for (let i = 0; i < searches.length; i++) {
if (searches[i].id === action.search.id) {
searches[i] = Object.assign({}, action.search);
found = true;
break;
}
}
if (!found) searches.push(action.search);
if (!action.search.query_pattern) {
newState.notesParentType = defaultNotesParentType(state, 'Search');
} else {
newState.notesParentType = 'Search';
}
newState.searches = searches;
}
break;
case 'SEARCH_DELETE':
newState = handleItemDelete(state, action);
break;
case 'SEARCH_SELECT':
{
newState = Object.assign({}, state);
newState.selectedSearchId = action.id;
if (!action.id) {
newState.notesParentType = defaultNotesParentType(state, 'Search');
} else {
newState.notesParentType = 'Search';
}
// Update history when searching
const lastSeenNote = stateUtils.getLastSeenNote(state);
if (lastSeenNote != null && (state.backwardHistoryNotes.length === 0 ||
state.backwardHistoryNotes[state.backwardHistoryNotes.length - 1].id != lastSeenNote.id)) {
newState.forwardHistoryNotes = [];
newState.backwardHistoryNotes.push(Object.assign({},lastSeenNote));
}
newState.selectedNoteIds = [];
}
break;
case 'APP_STATE_SET':
newState = Object.assign({}, state);
newState.appState = action.state;
break;
case 'SYNC_HAS_DISABLED_SYNC_ITEMS':
newState = Object.assign({}, state);
newState.hasDisabledSyncItems = true;
break;
case 'ENCRYPTION_HAS_DISABLED_ITEMS':
newState = Object.assign({}, state);
newState.hasDisabledEncryptionItems = action.value;
break;
case 'CLIPPER_SERVER_SET':
{
newState = Object.assign({}, state);
const clipperServer = Object.assign({}, newState.clipperServer);
if ('startState' in action) clipperServer.startState = action.startState;
if ('port' in action) clipperServer.port = action.port;
newState.clipperServer = clipperServer;
}
break;
case 'DECRYPTION_WORKER_SET':
{
newState = Object.assign({}, state);
const decryptionWorker = Object.assign({}, newState.decryptionWorker);
for (const n in action) {
if (!action.hasOwnProperty(n) || n === 'type') continue;
decryptionWorker[n] = action[n];
}
newState.decryptionWorker = decryptionWorker;
}
break;
case 'RESOURCE_FETCHER_SET':
{
newState = Object.assign({}, state);
const rf = Object.assign({}, action);
delete rf.type;
newState.resourceFetcher = rf;
}
break;
case 'LOAD_CUSTOM_CSS':
newState = Object.assign({}, state);
newState.customCss = action.css;
break;
case 'TEMPLATE_UPDATE_ALL':
newState = Object.assign({}, state);
newState.templates = action.templates;
break;
case 'SET_NOTE_TAGS':
newState = Object.assign({}, state);
newState.selectedNoteTags = action.items;
break;
case 'PLUGIN_DIALOG_SET':
{
if (!action.pluginName) throw new Error('action.pluginName not specified');
newState = Object.assign({}, state);
const newPlugins = Object.assign({}, newState.plugins);
const newPlugin = newState.plugins[action.pluginName] ? Object.assign({}, newState.plugins[action.pluginName]) : {};
if ('open' in action) newPlugin.dialogOpen = action.open;
newPlugins[action.pluginName] = newPlugin;
newState.plugins = newPlugins;
}
break;
}
} catch (error) {
error.message = `In reducer: ${error.message} Action: ${JSON.stringify(action)}`;
throw error;
}
if (action.type.indexOf('NOTE_UPDATE') === 0 || action.type.indexOf('FOLDER_UPDATE') === 0 || action.type.indexOf('TAG_UPDATE') === 0) {
newState = Object.assign({}, newState);
newState.hasEncryptedItems = stateHasEncryptedItems(newState);
}
return newState;
};
module.exports = { reducer, defaultState, stateUtils };
| 1 | 13,322 | Are there tests for this? What happens when you get to limit? | laurent22-joplin | js |
@@ -174,7 +174,7 @@ module Bolt
data['config'] = {}
end
- unless Bolt::Util.windows? || data['config']['transport']
+ unless data['config']['transport']
data['config']['transport'] = 'local' if target.name == 'localhost'
end
| 1 | # frozen_string_literal: true
require 'set'
require 'bolt/config'
require 'bolt/inventory/group'
require 'bolt/target'
require 'bolt/util'
module Bolt
class Inventory
ENVIRONMENT_VAR = 'BOLT_INVENTORY'
class ValidationError < Bolt::Error
attr_accessor :path
def initialize(message, offending_group)
super(message, 'bolt.inventory/validation-error')
@_message = message
@path = [offending_group].compact
end
def details
{ 'path' => path }
end
def add_parent(parent_group)
@path << parent_group
end
def message
if path.empty?
@_message
else
"#{@_message} for group at #{path}"
end
end
end
class WildcardError < Bolt::Error
def initialize(target)
super("Found 0 nodes matching wildcard pattern #{target}", 'bolt.inventory/wildcard-error')
end
end
def self.from_config(config)
if ENV.include?(ENVIRONMENT_VAR)
begin
data = YAML.safe_load(ENV[ENVIRONMENT_VAR])
rescue Psych::Exception
raise Bolt::ParseError, "Could not parse inventory from $#{ENVIRONMENT_VAR}"
end
else
data = Bolt::Util.read_config_file(config.inventoryfile, config.default_inventoryfile, 'inventory')
end
inventory = new(data, config)
inventory.validate
inventory
end
def initialize(data, config = nil, target_vars: {}, target_facts: {}, target_features: {})
@logger = Logging.logger[self]
# Config is saved to add config options to targets
@config = config || Bolt::Config.default
@data = data ||= {}
@groups = Group.new(data.merge('name' => 'all'))
@group_lookup = {}
@target_vars = target_vars
@target_facts = target_facts
@target_features = target_features
@groups.resolve_aliases(@groups.node_aliases)
collect_groups
end
def validate
@groups.validate
end
def collect_groups
# Provide a lookup map for finding a group by name
@group_lookup = @groups.collect_groups
end
def group_names
@group_lookup.keys
end
def node_names
@groups.node_names
end
def get_targets(targets)
targets = expand_targets(targets)
targets = if targets.is_a? Array
targets.flatten.uniq(&:name)
else
[targets]
end
targets.map { |t| update_target(t) }
end
def add_to_group(targets, desired_group)
if group_names.include?(desired_group)
targets.each do |target|
if group_names.include?(target.name)
raise ValidationError.new("Group #{target.name} conflicts with node of the same name", target.name)
end
add_node(@groups, target, desired_group)
end
else
raise ValidationError.new("Group #{desired_group} does not exist in inventory", nil)
end
end
def set_var(target, key, value)
data = { key => value }
set_vars_from_hash(target.name, data)
end
def vars(target)
@target_vars[target.name] || {}
end
def add_facts(target, new_facts = {})
@logger.warn("No facts to add") if new_facts.empty?
set_facts(target.name, new_facts)
end
def facts(target)
@target_facts[target.name] || {}
end
def set_feature(target, feature, value = true)
@target_features[target.name] ||= Set.new
if value
@target_features[target.name] << feature
else
@target_features[target.name].delete(feature)
end
end
def features(target)
@target_features[target.name] || Set.new
end
def data_hash
{
data: @data,
target_hash: {
target_vars: @target_vars,
target_facts: @target_facts,
target_features: @target_features
},
config: @config.transport_data_get
}
end
#### PRIVATE ####
#
# For debugging only now
def groups_in(node_name)
@groups.data_for(node_name)['groups'] || {}
end
private :groups_in
# Pass a target to get_targets for a public version of this
# Should this reconfigure configured targets?
def update_target(target)
data = @groups.data_for(target.name)
data ||= {}
unless data['config']
@logger.debug("Did not find config for #{target.name} in inventory")
data['config'] = {}
end
unless Bolt::Util.windows? || data['config']['transport']
data['config']['transport'] = 'local' if target.name == 'localhost'
end
# These should only get set from the inventory if they have not yet
# been instantiated
set_vars_from_hash(target.name, data['vars']) unless @target_vars[target.name]
set_facts(target.name, data['facts']) unless @target_facts[target.name]
data['features']&.each { |feature| set_feature(target, feature) } unless @target_features[target.name]
# Use Config object to ensure config section is treated consistently with config file
conf = @config.deep_clone
conf.update_from_inventory(data['config'])
conf.validate
target.update_conf(conf.transport_conf)
unless target.transport.nil? || Bolt::TRANSPORTS.include?(target.transport.to_sym)
raise Bolt::UnknownTransportError.new(target.transport, target.uri)
end
target
end
private :update_target
# If target is a group name, expand it to the members of that group.
# Else match against nodes in inventory by name or alias.
# If a wildcard string, error if no matches are found.
# Else fall back to [target] if no matches are found.
def resolve_name(target)
if (group = @group_lookup[target])
group.node_names
else
# Try to wildcard match nodes in inventory
# Ignore case because hostnames are generally case-insensitive
regexp = Regexp.new("^#{Regexp.escape(target).gsub('\*', '.*?')}$", Regexp::IGNORECASE)
nodes = @groups.node_names.select { |node| node =~ regexp }
nodes += @groups.node_aliases.select { |target_alias, _node| target_alias =~ regexp }.values
if nodes.empty?
raise(WildcardError, target) if target.include?('*')
[target]
else
nodes
end
end
end
private :resolve_name
def expand_targets(targets)
if targets.is_a? Bolt::Target
targets.inventory = self
targets
elsif targets.is_a? Array
targets.map { |tish| expand_targets(tish) }
elsif targets.is_a? String
# Expand a comma-separated list
targets.split(/[[:space:],]+/).reject(&:empty?).map do |name|
ts = resolve_name(name)
ts.map do |t|
target = Target.new(t)
target.inventory = self
target
end
end
end
end
private :expand_targets
def set_vars_from_hash(tname, data)
if data
# Instantiate empty vars hash in case no vars are defined
@target_vars[tname] ||= {}
# Assign target new merged vars hash
# This is essentially a copy-on-write to maintain the immutability of @target_vars
@target_vars[tname] = @target_vars[tname].merge(data).freeze
end
end
private :set_vars_from_hash
def set_facts(tname, hash)
if hash
@target_facts[tname] ||= {}
@target_facts[tname] = Bolt::Util.deep_merge(@target_facts[tname], hash).freeze
end
end
private :set_facts
def add_node(current_group, target, desired_group, track = { 'all' => nil })
if current_group.name == desired_group
# Group to add to is found
t_name = target.name
# Add target to nodes hash
current_group.nodes[t_name] = { 'name' => t_name }.merge(target.options)
# Inherit facts, vars, and features from hierarchy
current_group_data = { facts: current_group.facts, vars: current_group.vars, features: current_group.features }
data = inherit_data(track, current_group.name, current_group_data)
set_facts(t_name, @target_facts[t_name] ? data[:facts].merge(@target_facts[t_name]) : data[:facts])
set_vars_from_hash(t_name, @target_vars[t_name] ? data[:vars].merge(@target_vars[t_name]) : data[:vars])
data[:features].each do |feature|
set_feature(target, feature)
end
return true
end
# Recurse on children Groups if not desired_group
current_group.groups.each do |child_group|
track[child_group.name] = current_group
add_node(child_group, target, desired_group, track)
end
end
private :add_node
def inherit_data(track, name, data)
unless track[name].nil?
data[:facts] = track[name].facts.merge(data[:facts])
data[:vars] = track[name].vars.merge(data[:vars])
data[:features].concat(track[name].features)
inherit_data(track, track[name].name, data)
end
data
end
private :inherit_data
end
end
| 1 | 10,168 | Whole thing could be collapsed to `data['config']['transport'] ||= 'local' if target.name == 'localhost'`. | puppetlabs-bolt | rb |
@@ -73,6 +73,15 @@ public class OpenPgpKeyPreference extends Preference {
@Override
protected void onClick() {
+ bindServiceAndGetSignKeyId(new Intent());
+ }
+
+ private void bindServiceAndGetSignKeyId(final Intent data) {
+ if (mServiceConnection != null) {
+ getSignKeyId(data);
+ return;
+ }
+
// bind to service
mServiceConnection = new OpenPgpServiceConnection(
getContext().getApplicationContext(), | 1 | /*
* Copyright (C) 2015 Dominik Schürmann <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openintents.openpgp.util;
import android.app.Activity;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.content.IntentSender;
import android.content.res.TypedArray;
import android.os.Parcel;
import android.os.Parcelable;
import android.preference.Preference;
import android.text.TextUtils;
import android.util.AttributeSet;
import android.util.Log;
import org.openintents.openpgp.IOpenPgpService2;
import org.openintents.openpgp.OpenPgpError;
import org.openintents.openpgp.R;
public class OpenPgpKeyPreference extends Preference {
private long mKeyId;
private String mOpenPgpProvider;
private OpenPgpServiceConnection mServiceConnection;
private String mDefaultUserId;
public static final int REQUEST_CODE_KEY_PREFERENCE = 9999;
private static final int NO_KEY = 0;
public OpenPgpKeyPreference(Context context, AttributeSet attrs) {
super(context, attrs);
}
@Override
public CharSequence getSummary() {
return (mKeyId == NO_KEY) ? getContext().getString(R.string.openpgp_no_key_selected)
: getContext().getString(R.string.openpgp_key_selected);
}
private void updateEnabled() {
if (TextUtils.isEmpty(mOpenPgpProvider)) {
setEnabled(false);
} else {
setEnabled(true);
}
}
public void setOpenPgpProvider(String packageName) {
mOpenPgpProvider = packageName;
updateEnabled();
}
public void setDefaultUserId(String userId) {
mDefaultUserId = userId;
}
@Override
protected void onClick() {
// bind to service
mServiceConnection = new OpenPgpServiceConnection(
getContext().getApplicationContext(),
mOpenPgpProvider,
new OpenPgpServiceConnection.OnBound() {
@Override
public void onBound(IOpenPgpService2 service) {
getSignKeyId(new Intent());
}
@Override
public void onError(Exception e) {
Log.e(OpenPgpApi.TAG, "exception on binding!", e);
}
}
);
mServiceConnection.bindToService();
}
private void getSignKeyId(Intent data) {
data.setAction(OpenPgpApi.ACTION_GET_SIGN_KEY_ID);
data.putExtra(OpenPgpApi.EXTRA_USER_ID, mDefaultUserId);
OpenPgpApi api = new OpenPgpApi(getContext(), mServiceConnection.getService());
api.executeApiAsync(data, null, null, new MyCallback(REQUEST_CODE_KEY_PREFERENCE));
}
private class MyCallback implements OpenPgpApi.IOpenPgpCallback {
int requestCode;
private MyCallback(int requestCode) {
this.requestCode = requestCode;
}
@Override
public void onReturn(Intent result) {
switch (result.getIntExtra(OpenPgpApi.RESULT_CODE, OpenPgpApi.RESULT_CODE_ERROR)) {
case OpenPgpApi.RESULT_CODE_SUCCESS: {
long keyId = result.getLongExtra(OpenPgpApi.EXTRA_SIGN_KEY_ID, NO_KEY);
save(keyId);
break;
}
case OpenPgpApi.RESULT_CODE_USER_INTERACTION_REQUIRED: {
PendingIntent pi = result.getParcelableExtra(OpenPgpApi.RESULT_INTENT);
try {
Activity act = (Activity) getContext();
act.startIntentSenderFromChild(
act, pi.getIntentSender(),
requestCode, null, 0, 0, 0);
} catch (IntentSender.SendIntentException e) {
Log.e(OpenPgpApi.TAG, "SendIntentException", e);
}
break;
}
case OpenPgpApi.RESULT_CODE_ERROR: {
OpenPgpError error = result.getParcelableExtra(OpenPgpApi.RESULT_ERROR);
Log.e(OpenPgpApi.TAG, "RESULT_CODE_ERROR: " + error.getMessage());
break;
}
}
}
}
private void save(long newValue) {
// Give the client a chance to ignore this change if they deem it
// invalid
if (!callChangeListener(newValue)) {
// They don't want the value to be set
return;
}
setAndPersist(newValue);
}
/**
* Public API
*/
public void setValue(long keyId) {
setAndPersist(keyId);
}
/**
* Public API
*/
public long getValue() {
return mKeyId;
}
private void setAndPersist(long newValue) {
mKeyId = newValue;
// Save to persistent storage (this method will make sure this
// preference should be persistent, along with other useful checks)
persistLong(mKeyId);
// Data has changed, notify so UI can be refreshed!
notifyChanged();
// also update summary
setSummary(getSummary());
}
@Override
protected Object onGetDefaultValue(TypedArray a, int index) {
// This preference type's value type is Long, so we read the default
// value from the attributes as an Integer.
return (long) a.getInteger(index, NO_KEY);
}
@Override
protected void onSetInitialValue(boolean restoreValue, Object defaultValue) {
if (restoreValue) {
// Restore state
mKeyId = getPersistedLong(mKeyId);
} else {
// Set state
long value = (Long) defaultValue;
setAndPersist(value);
}
}
@Override
protected Parcelable onSaveInstanceState() {
/*
* Suppose a client uses this preference type without persisting. We
* must save the instance state so it is able to, for example, survive
* orientation changes.
*/
final Parcelable superState = super.onSaveInstanceState();
if (isPersistent()) {
// No need to save instance state since it's persistent
return superState;
}
// Save the instance state
final SavedState myState = new SavedState(superState);
myState.keyId = mKeyId;
myState.openPgpProvider = mOpenPgpProvider;
myState.defaultUserId = mDefaultUserId;
return myState;
}
@Override
protected void onRestoreInstanceState(Parcelable state) {
if (!state.getClass().equals(SavedState.class)) {
// Didn't save state for us in onSaveInstanceState
super.onRestoreInstanceState(state);
return;
}
// Restore the instance state
SavedState myState = (SavedState) state;
super.onRestoreInstanceState(myState.getSuperState());
mKeyId = myState.keyId;
mOpenPgpProvider = myState.openPgpProvider;
mDefaultUserId = myState.defaultUserId;
notifyChanged();
}
/**
* SavedState, a subclass of {@link BaseSavedState}, will store the state
* of MyPreference, a subclass of Preference.
* <p/>
* It is important to always call through to super methods.
*/
private static class SavedState extends BaseSavedState {
long keyId;
String openPgpProvider;
String defaultUserId;
public SavedState(Parcel source) {
super(source);
keyId = source.readInt();
openPgpProvider = source.readString();
defaultUserId = source.readString();
}
@Override
public void writeToParcel(Parcel dest, int flags) {
super.writeToParcel(dest, flags);
dest.writeLong(keyId);
dest.writeString(openPgpProvider);
dest.writeString(defaultUserId);
}
public SavedState(Parcelable superState) {
super(superState);
}
public static final Parcelable.Creator<SavedState> CREATOR =
new Parcelable.Creator<SavedState>() {
public SavedState createFromParcel(Parcel in) {
return new SavedState(in);
}
public SavedState[] newArray(int size) {
return new SavedState[size];
}
};
}
public boolean handleOnActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == REQUEST_CODE_KEY_PREFERENCE && resultCode == Activity.RESULT_OK) {
getSignKeyId(data);
return true;
} else {
return false;
}
}
} | 1 | 16,710 | maybe throw in `&& mServiceConnection.isBound()`, there are cases where the connection might die | k9mail-k-9 | java |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.