patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -4,12 +4,13 @@ import (
"encoding/json"
"fmt"
+ "github.com/google/uuid"
installertypes "github.com/openshift/installer/pkg/types"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
)
-const fakeMetadataFormatStr = `{"aws":{"identifier":[{"kubernetes.io/cluster/fake-infraid":"owned"},{"openshiftClusterID":"fake-cluster-id"}],"region":"us-east-1"},"clusterID":"fake-cluster-id","clusterName":"%s","infraID":"fake-infra-id"}`
+const fakeMetadataFormatStr = `{"aws":{"identifier":[{"kubernetes.io/cluster/fake-infraid":"owned"},{"openshiftClusterID":"%s"}],"region":"us-east-1"},"clusterID":"%s","clusterName":"%s","infraID":"fake-infra-id"}`
func fakeLoadAdminPassword(m *InstallManager) (string, error) {
m.log.Warn("loading fake admin password") | 1 | package installmanager
import (
"encoding/json"
"fmt"
installertypes "github.com/openshift/installer/pkg/types"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
)
const fakeMetadataFormatStr = `{"aws":{"identifier":[{"kubernetes.io/cluster/fake-infraid":"owned"},{"openshiftClusterID":"fake-cluster-id"}],"region":"us-east-1"},"clusterID":"fake-cluster-id","clusterName":"%s","infraID":"fake-infra-id"}`
func fakeLoadAdminPassword(m *InstallManager) (string, error) {
m.log.Warn("loading fake admin password")
return "fake-password", nil
}
func fakeReadClusterMetadata(provision *hivev1.ClusterProvision, m *InstallManager) ([]byte, *installertypes.ClusterMetadata, error) {
m.log.Warn("returning fake cluster metadata")
metadataBytes := []byte(fmt.Sprintf(fakeMetadataFormatStr, provision.Spec.ClusterDeploymentRef.Name))
// Extract and save the cluster ID, this step is critical and a failure here
// should abort the install. Note that this is run *before* we begin provisioning cloud
// resources.
md := &installertypes.ClusterMetadata{}
if err := json.Unmarshal(metadataBytes, md); err != nil {
m.log.WithError(err).Error("error unmarshalling cluster metadata")
return nil, nil, err
}
return metadataBytes, md, nil
}
func fakeProvisionCluster(m *InstallManager) error {
m.log.Warn("skipping openshift-install create cluster for fake install")
return nil
}
| 1 | 16,762 | I think the goal of using `fake-cluster-id` was to make it sure clear that this is a fake cluster, replacing this with UUID only now makes these clusters look very similar to real ones which can cause problems.. any reason why we didn't go for the original recommendation from slack thread of `fake-cluster-UUID` @twiest | openshift-hive | go |
@@ -1719,3 +1719,19 @@ def get_node_first_ancestor_of_type(
if isinstance(ancestor, ancestor_type):
return ancestor
return None
+
+
+def get_node_first_ancestor_of_type_and_its_child(
+ node: nodes.NodeNG, ancestor_type: Union[Type[T_Node], Tuple[Type[T_Node]]]
+) -> Tuple[Optional[T_Node], Optional[T_Node]]:
+ """Modified version of get_node_first_ancestor_of_type to also return the
+ descendant visited directly before reaching the sought ancestor. Useful
+ for extracting whether a statement is guarded by a try, except, or finally
+ when searching for a TryFinally ancestor.
+ """
+ last_ancestor = node
+ for ancestor in node.node_ancestors():
+ if isinstance(ancestor, ancestor_type):
+ return (ancestor, last_ancestor)
+ last_ancestor = ancestor
+ return None, None | 1 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016-2017 Moises Lopez <[email protected]>
# Copyright (c) 2016 Brian C. Lane <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018 Alan Chan <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Yury Gribov <[email protected]>
# Copyright (c) 2018 Caio Carrara <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Brian Shaginaw <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Matthijs Blom <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Nathan Marrow <[email protected]>
# Copyright (c) 2019 Svet <[email protected]>
# Copyright (c) 2019 Pascal Corpet <[email protected]>
# Copyright (c) 2020 Batuhan Taskaya <[email protected]>
# Copyright (c) 2020 Luigi <[email protected]>
# Copyright (c) 2020 ethan-leba <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Ram Rachum <[email protected]>
# Copyright (c) 2020 Slavfox <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 bot <[email protected]>
# Copyright (c) 2021 Yu Shao, Pang <[email protected]>
# Copyright (c) 2021 Mark Byrne <[email protected]>
# Copyright (c) 2021 Nick Drozd <[email protected]>
# Copyright (c) 2021 Arianna Y <[email protected]>
# Copyright (c) 2021 Jaehoon Hwang <[email protected]>
# Copyright (c) 2021 Samuel FORESTIER <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 David Liu <[email protected]>
# Copyright (c) 2021 Matus Valo <[email protected]>
# Copyright (c) 2021 Lorena B <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""some functions that may be useful for various checkers
"""
import builtins
import itertools
import numbers
import re
import string
import warnings
from functools import lru_cache, partial
from typing import (
Callable,
Dict,
Iterable,
List,
Match,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import _string
import astroid
import astroid.objects
from astroid import TooManyLevelsError, nodes
from astroid.context import InferenceContext
COMP_NODE_TYPES = (
nodes.ListComp,
nodes.SetComp,
nodes.DictComp,
nodes.GeneratorExp,
)
EXCEPTIONS_MODULE = "builtins"
ABC_MODULES = {"abc", "_py_abc"}
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
TYPING_PROTOCOLS = frozenset(
{"typing.Protocol", "typing_extensions.Protocol", ".Protocol"}
)
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: these are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__trunc__",
"__floor__",
"__ceil__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__imatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
(1, 2): ("__pow__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
SUBSCRIPTABLE_CLASSES_PEP585 = frozenset(
(
"builtins.tuple",
"builtins.list",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"builtins.type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Callable",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
)
T_Node = TypeVar("T_Node", bound=nodes.NodeNG)
class NoSuchArgumentError(Exception):
pass
class InferredTypeError(Exception):
pass
def is_inside_lambda(node: nodes.NodeNG) -> bool:
"""Return whether the given node is inside a lambda"""
warnings.warn(
"utils.is_inside_lambda will be removed in favour of calling "
"utils.get_node_first_ancestor_of_type(x, nodes.Lambda) in pylint 3.0",
DeprecationWarning,
)
return any(isinstance(parent, nodes.Lambda) for parent in node.node_ancestors())
def get_all_elements(
node: nodes.NodeNG,
) -> Iterable[nodes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (nodes.Tuple, nodes.List)):
for child in node.elts:
yield from get_all_elements(child)
else:
yield node
def is_super(node: nodes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function"""
if getattr(node, "name", None) == "super" and node.root().name == "builtins":
return True
return False
def is_error(node: nodes.FunctionDef) -> bool:
"""Return true if the given function node only raises an exception"""
return len(node.body) == 1 and isinstance(node.body[0], nodes.Raise)
builtins = builtins.__dict__.copy() # type: ignore[assignment]
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: nodes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == "builtins"
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore[operator]
def is_defined_in_scope(
var_node: nodes.NodeNG,
varname: str,
scope: nodes.NodeNG,
) -> bool:
if isinstance(scope, nodes.If):
for node in scope.body:
if (
isinstance(node, nodes.Assign)
and any(
isinstance(target, nodes.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, nodes.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, nodes.For)):
for ass_node in scope.nodes_of_class(nodes.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, nodes.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, nodes.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (nodes.Lambda, nodes.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, nodes.ExceptHandler):
if isinstance(scope.name, nodes.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: nodes.Name) -> bool:
"""Check if the given variable node is defined before
Verify that the variable node is defined by a parent node
(list, set, dict, or generator comprehension, lambda)
or in a previous sibling node on the same line
(statement_defining ; statement_using).
"""
varname = var_node.name
for parent in var_node.node_ancestors():
if is_defined_in_scope(var_node, varname, parent):
return True
# possibly multiple statements on the same line using semicolon separator
stmt = var_node.statement(future=True)
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(nodes.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((nodes.ImportFrom, nodes.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(
node: nodes.NodeNG, scope: Optional[nodes.NodeNG] = None
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (nodes.FunctionDef, nodes.Lambda)):
all_defaults = itertools.chain(
scope.args.defaults, (d for d in scope.args.kw_defaults if d is not None)
)
return any(
default_name_node is node
for default_node in all_defaults
for default_name_node in default_node.nodes_of_class(nodes.Name)
)
return False
def is_func_decorator(node: nodes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
for parent in node.node_ancestors():
if isinstance(parent, nodes.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(
nodes.Lambda,
nodes.ComprehensionScope,
nodes.ListComp,
),
):
break
return False
def is_ancestor_name(frame: nodes.ClassDef, node: nodes.NodeNG) -> bool:
"""return whether `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
if not isinstance(frame, nodes.ClassDef):
return False
return any(node in base.nodes_of_class(nodes.Name) for base in frame.bases)
def is_being_called(node: nodes.NodeNG) -> bool:
"""return True if node is the function being called in a Call node"""
return isinstance(node.parent, nodes.Call) and node.parent.func is node
def assign_parent(node: nodes.NodeNG) -> nodes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (nodes.AssignName, nodes.Tuple, nodes.List)):
node = node.parent
return node
def overrides_a_method(class_node: nodes.ClassDef, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor
which is not the base object class"""
for ancestor in class_node.ancestors():
if ancestor.name == "object":
continue
if name in ancestor and isinstance(ancestor[name], nodes.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
super().__init__(index)
self.index = index
def parse_format_string(
format_string: str,
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where 'keys'
is the set of mapping keys in the format string, and 'num_args' is the number
of arguments required by the format string. Raises IncompleteFormatString or
UnsupportedFormatCharacter if a parse error occurs.
"""
keys = set()
key_types = {}
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
"""Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: nodes.NodeNG) -> Optional[nodes.ClassDef]:
"""Return the class that is wrapping the given node
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
nodes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, nodes.ClassDef)
):
if klass.parent is None:
return None
klass = klass.parent.frame()
return klass
def get_outer_class(class_node: astroid.ClassDef) -> Optional[astroid.ClassDef]:
"""Return the class that is the outer class of given (nested) class_node"""
parent_klass = class_node.parent.frame()
return parent_klass if isinstance(parent_klass, astroid.ClassDef) else None
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: nodes.Call, position: Optional[int] = None, keyword: Optional[str] = None
) -> nodes.Name:
"""Returns the specified argument from a function call.
:param nodes.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: nodes.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: nodes.NodeNG) -> bool:
"""
Return whether the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
return any(
ancestor.name in {"Exception", "BaseException"}
and ancestor.root().name == EXCEPTIONS_MODULE
for ancestor in itertools.chain([node], ancestors)
)
def error_of_type(handler: nodes.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,)
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
return False
return handler.catch(expected_errors)
def decorated_with_property(node: nodes.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property."""
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node, *kinds):
if not isinstance(node, (astroid.UnboundMethod, nodes.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, nodes.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is a property setter"""
return _is_property_kind(node, "setter")
def is_property_deleter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is a property deleter"""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is either a property setter or a deleter"""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: nodes.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, nodes.ClassDef):
if inferred.qname() in {"builtins.property", "functools.cached_property"}:
return True
for ancestor in inferred.ancestors():
if ancestor.name == "property" and ancestor.root().name == "builtins":
return True
elif isinstance(inferred, nodes.FunctionDef):
# If decorator is function, check if it has exactly one return
# and the return is itself a function decorated with property
returns: List[nodes.Return] = list(
inferred._get_return_nodes_skip_functions()
)
if len(returns) == 1 and isinstance(
returns[0].value, (nodes.Name, nodes.Attribute)
):
inferred = safe_infer(returns[0].value)
if (
inferred
and isinstance(inferred, astroid.objects.Property)
and isinstance(inferred.function, nodes.FunctionDef)
):
return decorated_with_property(inferred.function)
return False
def decorated_with(
func: Union[
nodes.ClassDef, nodes.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod
],
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, nodes.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i.name in qnames or i.qname() in qnames
for i in decorator_node.infer()
if i is not None and i != astroid.Uninferable
):
return True
except astroid.InferenceError:
continue
return False
def uninferable_final_decorators(
node: nodes.Decorators,
) -> List[Optional[Union[nodes.Attribute, nodes.Name]]]:
"""Return a list of uninferable `typing.final` decorators in `node`.
This function is used to determine if the `typing.final` decorator is used
with an unsupported Python version; the decorator cannot be inferred when
using a Python version lower than 3.8.
"""
decorators = []
for decorator in getattr(node, "nodes", []):
if isinstance(decorator, nodes.Attribute):
try:
import_node = decorator.expr.lookup(decorator.expr.name)[1][0]
except AttributeError:
continue
elif isinstance(decorator, nodes.Name):
lookup_values = decorator.lookup(decorator.name)
if lookup_values[1]:
import_node = lookup_values[1][0]
else:
continue # pragma: no cover # Covered on Python < 3.8
else:
continue
if not isinstance(import_node, (astroid.Import, astroid.ImportFrom)):
continue
import_names = dict(import_node.names)
# from typing import final
is_from_import = ("final" in import_names) and import_node.modname == "typing"
# import typing
is_import = ("typing" in import_names) and getattr(
decorator, "attrname", None
) == "final"
if (is_from_import or is_import) and safe_infer(decorator) in [
astroid.Uninferable,
None,
]:
decorators.append(decorator)
return decorators
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: nodes.ClassDef, is_abstract_cb: nodes.FunctionDef = None
) -> Dict[str, nodes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: Dict[str, nodes.NodeNG] = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don't try to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, nodes.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, nodes.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, nodes.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: nodes.NodeNG,
) -> Optional[Union[nodes.ExceptHandler, nodes.TryExcept]]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (nodes.ExceptHandler, nodes.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: nodes.NodeNG,
) -> Optional[Union[nodes.ExceptHandler, nodes.TryExcept]]:
"""Return the ExceptHandler in which the node is, without going out of scope."""
for current in node.node_ancestors():
if isinstance(current, astroid.scoped_nodes.LocalsDictNodeNG):
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
if isinstance(current, nodes.ExceptHandler):
return current
return None
def is_from_fallback_block(node: nodes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, nodes.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (nodes.ImportFrom, nodes.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exceptions(
handlers, (ImportError, ModuleNotFoundError)
)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exceptions(
handlers: nodes.ExceptHandler,
exceptions: Tuple[Type[ImportError], Type[ModuleNotFoundError]],
) -> bool:
func = partial(error_of_type, error_type=exceptions)
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: nodes.NodeNG, exception=Exception
) -> Optional[List[nodes.ExceptHandler]]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (nodes.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, nodes.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def is_node_inside_try_except(node: nodes.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (nodes.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, nodes.TryExcept)
def node_ignores_exception(node: nodes.NodeNG, exception=Exception) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: nodes.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: nodes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, nodes.AssignName):
if isinstance(first.parent.value, nodes.Const):
return False
return True
def is_comprehension(node: nodes.NodeNG) -> bool:
comprehensions = (
nodes.ListComp,
nodes.SetComp,
nodes.DictComp,
nodes.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: nodes.NodeNG) -> bool:
while node is not None:
if isinstance(node, nodes.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: nodes.NodeNG, protocol_callback: nodes.FunctionDef
) -> bool:
if isinstance(value, nodes.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
if (
isinstance(value, astroid.bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: nodes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: nodes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value: nodes.NodeNG, node: nodes.NodeNG) -> bool:
if isinstance(value, nodes.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_class_subscriptable_pep585_with_postponed_evaluation_enabled(value, node):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node):
pytype = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(node: nodes.NodeNG, context=None) -> Optional[nodes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
"""
inferred_types = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
if value is not astroid.Uninferable:
inferred_types.add(_get_python_type_of_node(value))
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
if (
isinstance(inferred, nodes.FunctionDef)
and inferred.args.args is not None
and value.args.args is not None
and len(inferred.args.args) != len(value.args.args)
):
return None # Different number of arguments indicates ambiguity
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
return value if len(inferred_types) <= 1 else None
@lru_cache(maxsize=512)
def infer_all(
node: nodes.NodeNG, context: InferenceContext = None
) -> List[nodes.NodeNG]:
try:
return list(node.infer(context=context))
except astroid.InferenceError:
return []
def has_known_bases(klass: nodes.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, nodes.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: nodes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, nodes.Const) and node.value is None)
or (isinstance(node, nodes.Name) and node.name == "None")
)
def node_type(node: nodes.NodeNG) -> Optional[nodes.NodeNG]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types: Set[nodes.NodeNG] = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: nodes.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, nodes.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, nodes.Call):
continue
func = decorator.func
if not isinstance(func, nodes.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, nodes.FunctionDef):
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: nodes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: nodes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
module = node.root()
return "annotations" in module.future_imports
def is_class_subscriptable_pep585_with_postponed_evaluation_enabled(
value: nodes.ClassDef, node: nodes.NodeNG
) -> bool:
"""Check if class is subscriptable with PEP 585 and
postponed evaluation enabled.
"""
return (
is_postponed_evaluation_enabled(node)
and value.qname() in SUBSCRIPTABLE_CLASSES_PEP585
and is_node_in_type_annotation_context(node)
)
def is_node_in_type_annotation_context(node: nodes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type anntation.
"""
# pylint: disable=too-many-boolean-expressions
current_node, parent_node = node, node.parent
while True:
if (
isinstance(parent_node, nodes.AnnAssign)
and parent_node.annotation == current_node
or isinstance(parent_node, nodes.Arguments)
and current_node
in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
)
or isinstance(parent_node, nodes.FunctionDef)
and parent_node.returns == current_node
):
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, nodes.Module):
return False
def is_subclass_of(child: nodes.ClassDef, parent: nodes.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, nodes.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: nodes.NodeNG) -> bool:
"""Check if a node is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: nodes.NodeNG) -> bool:
"""Check if the given node represents a protocol class
:param cls: The node to check
:returns: True if the node is a typing protocol class, false otherwise.
"""
if not isinstance(cls, nodes.ClassDef):
return False
# Use .ancestors() since not all protocol classes can have
# their mro deduced.
return any(parent.qname() in TYPING_PROTOCOLS for parent in cls.ancestors())
def is_call_of_name(node: nodes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name"""
return (
isinstance(node, nodes.Call)
and isinstance(node.func, nodes.Name)
and node.func.name == name
)
def is_test_condition(
node: nodes.NodeNG,
parent: Optional[nodes.NodeNG] = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness"""
parent = parent or node.parent
if isinstance(parent, (nodes.While, nodes.If, nodes.IfExp, nodes.Assert)):
return node is parent.test or parent.test.parent_of(node)
if isinstance(parent, nodes.Comprehension):
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: nodes.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
return any(isinstance(b, nodes.Name) and b.name == "type" for b in node.bases)
def is_attribute_typed_annotation(
node: Union[nodes.ClassDef, astroid.Instance], attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
attribute = node.locals.get(attr_name, [None])[0]
if (
attribute
and isinstance(attribute, nodes.AssignName)
and isinstance(attribute.parent, nodes.AnnAssign)
):
return True
for base in node.bases:
inferred = safe_infer(base)
if (
inferred
and isinstance(inferred, nodes.ClassDef)
and is_attribute_typed_annotation(inferred, attr_name)
):
return True
return False
def is_assign_name_annotated_with(node: nodes.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, nodes.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, nodes.Subscript):
annotation = annotation.value
if (
isinstance(annotation, nodes.Name)
and annotation.name == typing_name
or isinstance(annotation, nodes.Attribute)
and annotation.attrname == typing_name
):
return True
return False
def get_iterating_dictionary_name(
node: Union[nodes.For, nodes.Comprehension]
) -> Optional[str]:
"""Get the name of the dictionary which keys are being iterated over on
a ``nodes.For`` or ``nodes.Comprehension`` node.
If the iterating object is not either the keys method of a dictionary
or a dictionary itself, this returns None.
"""
# Is it a proper keys call?
if (
isinstance(node.iter, nodes.Call)
and isinstance(node.iter.func, nodes.Attribute)
and node.iter.func.attrname == "keys"
):
inferred = safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return None
return node.iter.as_string().rpartition(".keys")[0]
# Is it a dictionary?
if isinstance(node.iter, (nodes.Name, nodes.Attribute)):
inferred = safe_infer(node.iter)
if not isinstance(inferred, nodes.Dict):
return None
return node.iter.as_string()
return None
def get_subscript_const_value(node: nodes.Subscript) -> nodes.Const:
"""
Returns the value 'subscript.slice' of a Subscript node.
:param node: Subscript Node to extract value from
:returns: Const Node containing subscript value
:raises InferredTypeError: if the subscript node cannot be inferred as a Const
"""
inferred = safe_infer(node.slice)
if not isinstance(inferred, nodes.Const):
raise InferredTypeError("Subscript.slice cannot be inferred as a nodes.Const")
return inferred
def get_import_name(
importnode: Union[nodes.Import, nodes.ImportFrom], modname: str
) -> str:
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
:param importnode: node representing import statement.
:param modname: module name from import statement.
:returns: absolute qualified module name of the module
used in import.
"""
if isinstance(importnode, nodes.ImportFrom) and importnode.level:
root = importnode.root()
if isinstance(root, nodes.Module):
try:
return root.relative_to_absolute_name(modname, level=importnode.level)
except TooManyLevelsError:
return modname
return modname
def is_sys_guard(node: nodes.If) -> bool:
"""Return True if IF stmt is a sys.version_info guard.
>>> import sys
>>> if sys.version_info > (3, 8):
>>> from typing import Literal
>>> else:
>>> from typing_extensions import Literal
"""
if isinstance(node.test, nodes.Compare):
value = node.test.left
if isinstance(value, nodes.Subscript):
value = value.value
if (
isinstance(value, nodes.Attribute)
and value.as_string() == "sys.version_info"
):
return True
return False
def is_typing_guard(node: nodes.If) -> bool:
"""Return True if IF stmt is a typing guard.
>>> from typing import TYPE_CHECKING
>>> if TYPE_CHECKING:
>>> from xyz import a
"""
return isinstance(
node.test, (nodes.Name, nodes.Attribute)
) and node.test.as_string().endswith("TYPE_CHECKING")
def is_node_in_guarded_import_block(node: nodes.NodeNG) -> bool:
"""Return True if node is part for guarded if block.
I.e. `sys.version_info` or `typing.TYPE_CHECKING`
"""
return isinstance(node.parent, nodes.If) and (
is_sys_guard(node.parent) or is_typing_guard(node.parent)
)
def is_reassigned_after_current(node: nodes.NodeNG, varname: str) -> bool:
"""Check if the given variable name is reassigned in the same scope after the current node"""
return any(
a.name == varname and a.lineno > node.lineno
for a in node.scope().nodes_of_class((nodes.AssignName, nodes.FunctionDef))
)
def is_function_body_ellipsis(node: nodes.FunctionDef) -> bool:
"""Checks whether a function body only consists of a single Ellipsis"""
return (
len(node.body) == 1
and isinstance(node.body[0], nodes.Expr)
and isinstance(node.body[0].value, nodes.Const)
and node.body[0].value.value == Ellipsis
)
def is_base_container(node: Optional[nodes.NodeNG]) -> bool:
return isinstance(node, nodes.BaseContainer) and not node.elts
def is_empty_dict_literal(node: Optional[nodes.NodeNG]) -> bool:
return isinstance(node, nodes.Dict) and not node.items
def is_empty_str_literal(node: Optional[nodes.NodeNG]) -> bool:
return (
isinstance(node, nodes.Const) and isinstance(node.value, str) and not node.value
)
def returns_bool(node: nodes.NodeNG) -> bool:
"""Returns true if a node is a return that returns a constant boolean"""
return (
isinstance(node, nodes.Return)
and isinstance(node.value, nodes.Const)
and node.value.value in {True, False}
)
def get_node_first_ancestor_of_type(
node: nodes.NodeNG, ancestor_type: Union[Type[T_Node], Tuple[Type[T_Node]]]
) -> Optional[T_Node]:
"""Return the first parent node that is any of the provided types (or None)"""
for ancestor in node.node_ancestors():
if isinstance(ancestor, ancestor_type):
return ancestor
return None
| 1 | 19,468 | I need to spruce up my `mypy` knowledge but I think this is incorrect. Using `T_Node` twice makes it so that both should be the same type. I think this would be better: `Tuple[Optional[T_Node], Optional[nodes.NodeNG ]]`. It might even better to do: `Union[Tuple[None, None]. Tuple[T_Node, nodes.NodeNG ]]` as that shows that `None` should always pair with `None` | PyCQA-pylint | py |
@@ -438,10 +438,13 @@ func runTest(t *testing.T, test testT) {
}
controller := certificaterequests.New(apiutil.IssuerCA, ca)
- controller.Register(test.builder.Context)
+ _, _, err := controller.Register(test.builder.Context)
+ if err != nil {
+ t.Errorf("controller.Register failed (%s)", err.Error())
+ }
test.builder.Start()
- err := controller.Sync(context.Background(), test.certificateRequest)
+ err = controller.Sync(context.Background(), test.certificateRequest)
if err != nil && !test.expectedErr {
t.Errorf("expected to not get an error, but got: %v", err)
} | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ca
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"math"
"math/big"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientcorev1 "k8s.io/client-go/listers/core/v1"
coretesting "k8s.io/client-go/testing"
fakeclock "k8s.io/utils/clock/testing"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
"github.com/jetstack/cert-manager/pkg/controller"
"github.com/jetstack/cert-manager/pkg/controller/certificaterequests"
"github.com/jetstack/cert-manager/pkg/controller/certificaterequests/util"
controllertest "github.com/jetstack/cert-manager/pkg/controller/test"
testpkg "github.com/jetstack/cert-manager/pkg/controller/test"
"github.com/jetstack/cert-manager/pkg/util/pki"
"github.com/jetstack/cert-manager/test/unit/gen"
"github.com/jetstack/cert-manager/test/unit/listers"
testlisters "github.com/jetstack/cert-manager/test/unit/listers"
)
var (
fixedClockStart = time.Now()
fixedClock = fakeclock.NewFakeClock(fixedClockStart)
)
func generateCSR(t *testing.T, secretKey crypto.Signer) []byte {
asn1Subj, _ := asn1.Marshal(pkix.Name{
CommonName: "test",
}.ToRDNSequence())
template := x509.CertificateRequest{
RawSubject: asn1Subj,
SignatureAlgorithm: x509.SHA256WithRSA,
}
csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &template, secretKey)
if err != nil {
t.Error(err)
t.FailNow()
}
csr := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes})
return csr
}
func generateSelfSignedCertFromCR(t *testing.T, cr *cmapi.CertificateRequest, key crypto.Signer,
duration time.Duration) (*x509.Certificate, []byte) {
template, err := pki.GenerateTemplateFromCertificateRequest(cr)
if err != nil {
t.Errorf("error generating template: %v", err)
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
if err != nil {
t.Errorf("error signing cert: %v", err)
t.FailNow()
}
pemByteBuffer := bytes.NewBuffer([]byte{})
err = pem.Encode(pemByteBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
if err != nil {
t.Errorf("failed to encode cert: %v", err)
t.FailNow()
}
return template, pemByteBuffer.Bytes()
}
func TestSign(t *testing.T) {
metaFixedClockStart := metav1.NewTime(fixedClockStart)
baseIssuer := gen.Issuer("test-issuer",
gen.SetIssuerCA(cmapi.CAIssuer{SecretName: "root-ca-secret"}),
gen.AddIssuerCondition(cmapi.IssuerCondition{
Type: cmapi.IssuerConditionReady,
Status: cmmeta.ConditionTrue,
}),
)
// Build root RSA CA
skRSA, err := pki.GenerateRSAPrivateKey(2048)
if err != nil {
t.Error(err)
t.FailNow()
}
skRSAPEM := pki.EncodePKCS1PrivateKey(skRSA)
rsaCSR := generateCSR(t, skRSA)
baseCRNotApproved := gen.CertificateRequest("test-cr",
gen.SetCertificateRequestIsCA(true),
gen.SetCertificateRequestCSR(rsaCSR),
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Name: baseIssuer.DeepCopy().Name,
Group: certmanager.GroupName,
Kind: "Issuer",
}),
gen.SetCertificateRequestDuration(&metav1.Duration{Duration: time.Hour * 24 * 60}),
)
baseCRDenied := gen.CertificateRequestFrom(baseCRNotApproved,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionDenied,
Status: cmmeta.ConditionTrue,
Reason: "Foo",
Message: "Certificate request has been denied by cert-manager.io",
LastTransitionTime: &metaFixedClockStart,
}),
)
baseCR := gen.CertificateRequestFrom(baseCRNotApproved,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionApproved,
Status: cmmeta.ConditionTrue,
Reason: "cert-manager.io",
Message: "Certificate request has been approved by cert-manager.io",
LastTransitionTime: &metaFixedClockStart,
}),
)
// generate a self signed root ca valid for 60d
_, rsaPEMCert := generateSelfSignedCertFromCR(t, baseCR, skRSA, time.Hour*24*60)
rsaCASecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "root-ca-secret",
Namespace: gen.DefaultTestNamespace,
},
Data: map[string][]byte{
corev1.TLSPrivateKeyKey: skRSAPEM,
corev1.TLSCertKey: rsaPEMCert,
},
}
badDataSecret := rsaCASecret.DeepCopy()
badDataSecret.Data[corev1.TLSPrivateKeyKey] = []byte("bad key")
template, err := pki.GenerateTemplateFromCertificateRequest(baseCR)
if err != nil {
t.Error(err)
t.FailNow()
}
certPEM, _, err := pki.SignCSRTemplate([]*x509.Certificate{template}, skRSA, template)
if err != nil {
t.Error(err)
t.FailNow()
}
tests := map[string]testT{
"a CertificateRequest without an approved condition should do nothing": {
certificateRequest: baseCRNotApproved.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{},
CertManagerObjects: []runtime.Object{baseCRNotApproved.DeepCopy(), baseIssuer.DeepCopy()},
},
},
"a CertificateRequest with a denied condition should update Ready condition with 'Denied'": {
certificateRequest: baseCRDenied.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{},
CertManagerObjects: []runtime.Object{baseCRDenied.DeepCopy(), baseIssuer.DeepCopy()},
ExpectedEvents: []string{},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCRDenied,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: "Denied",
Message: "The CertificateRequest was denied by an approval controller",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
},
"a missing CA key pair should set the condition to pending and wait for a re-sync": {
certificateRequest: baseCR.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal SecretMissing Referenced secret default-unit-test-ns/root-ca-secret not found: secret "root-ca-secret" not found`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR.DeepCopy(),
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: `Referenced secret default-unit-test-ns/root-ca-secret not found: secret "root-ca-secret" not found`,
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"a secret with invalid data should set condition to pending and wait for re-sync": {
certificateRequest: baseCR.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{badDataSecret},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(),
gen.IssuerFrom(baseIssuer.DeepCopy(),
gen.SetIssuerCA(cmapi.CAIssuer{SecretName: badDataSecret.Name}),
),
},
ExpectedEvents: []string{
"Normal SecretInvalidData Failed to parse signing CA keypair from secret default-unit-test-ns/root-ca-secret: error decoding private key PEM block",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR.DeepCopy(),
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Failed to parse signing CA keypair from secret default-unit-test-ns/root-ca-secret: error decoding private key PEM block",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"a CertificateRequest that transiently fails a secret lookup should backoff error to retry": {
certificateRequest: baseCR.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{rsaCASecret},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},
ExpectedEvents: []string{
`Normal SecretGetError Failed to get certificate key pair from secret default-unit-test-ns/root-ca-secret: this is a network error`,
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonPending,
Message: "Failed to get certificate key pair from secret default-unit-test-ns/root-ca-secret: this is a network error",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
fakeLister: &testlisters.FakeSecretLister{
SecretsFn: func(namespace string) clientcorev1.SecretNamespaceLister {
return &testlisters.FakeSecretNamespaceLister{
GetFn: func(name string) (ret *corev1.Secret, err error) {
return nil, errors.New("this is a network error")
},
}
},
},
expectedErr: true,
},
"should exit nil and set status pending if referenced issuer is not ready": {
certificateRequest: baseCR.DeepCopy(),
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{rsaCASecret},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(),
gen.Issuer(baseIssuer.DeepCopy().Name,
gen.SetIssuerCA(cmapi.CAIssuer{}),
)},
ExpectedEvents: []string{
"Normal IssuerNotReady Referenced issuer does not have a Ready status condition",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: "Pending",
Message: "Referenced issuer does not have a Ready status condition",
LastTransitionTime: &metaFixedClockStart,
}),
),
)),
},
},
},
"a secret that fails to sign due to failing to generate the certificate template should set condition to failed": {
certificateRequest: baseCR.DeepCopy(),
templateGenerator: func(*cmapi.CertificateRequest) (*x509.Certificate, error) {
return nil, errors.New("this is a template generate error")
},
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{rsaCASecret},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},
ExpectedEvents: []string{
"Warning SigningError Error generating certificate template: this is a template generate error",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR.DeepCopy(),
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionFalse,
Reason: cmapi.CertificateRequestReasonFailed,
Message: "Error generating certificate template: this is a template generate error",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestFailureTime(metaFixedClockStart),
),
)),
},
},
},
"a successful signing should set condition to Ready": {
certificateRequest: baseCR.DeepCopy(),
templateGenerator: func(cr *cmapi.CertificateRequest) (*x509.Certificate, error) {
_, err := pki.GenerateTemplateFromCertificateRequest(cr)
if err != nil {
return nil, err
}
return template, nil
},
builder: &testpkg.Builder{
KubeObjects: []runtime.Object{rsaCASecret},
CertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},
ExpectedEvents: []string{
"Normal CertificateIssued Certificate fetched from issuer successfully",
},
ExpectedActions: []testpkg.Action{
testpkg.NewAction(coretesting.NewUpdateSubresourceAction(
cmapi.SchemeGroupVersion.WithResource("certificaterequests"),
"status",
gen.DefaultTestNamespace,
gen.CertificateRequestFrom(baseCR,
gen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{
Type: cmapi.CertificateRequestConditionReady,
Status: cmmeta.ConditionTrue,
Reason: cmapi.CertificateRequestReasonIssued,
Message: "Certificate fetched from issuer successfully",
LastTransitionTime: &metaFixedClockStart,
}),
gen.SetCertificateRequestCA(rsaPEMCert),
gen.SetCertificateRequestCertificate(certPEM),
),
)),
},
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
fixedClock.SetTime(fixedClockStart)
test.builder.Clock = fixedClock
runTest(t, test)
})
}
}
type testT struct {
builder *testpkg.Builder
certificateRequest *cmapi.CertificateRequest
templateGenerator templateGenerator
expectedErr bool
fakeLister *testlisters.FakeSecretLister
}
func runTest(t *testing.T, test testT) {
test.builder.T = t
test.builder.Init()
defer test.builder.Stop()
ca := NewCA(test.builder.Context)
if test.fakeLister != nil {
ca.secretsLister = test.fakeLister
}
if test.templateGenerator != nil {
ca.templateGenerator = test.templateGenerator
}
controller := certificaterequests.New(apiutil.IssuerCA, ca)
controller.Register(test.builder.Context)
test.builder.Start()
err := controller.Sync(context.Background(), test.certificateRequest)
if err != nil && !test.expectedErr {
t.Errorf("expected to not get an error, but got: %v", err)
}
if err == nil && test.expectedErr {
t.Errorf("expected to get an error but did not get one")
}
test.builder.CheckAndFinish(err)
}
func TestCA_Sign(t *testing.T) {
rsaPair, err := pki.GenerateRSAPrivateKey(2048)
require.NoError(t, err)
rsaCSR := generateCSR(t, rsaPair)
tests := map[string]struct {
givenCASecret *corev1.Secret
givenCAIssuer cmapi.GenericIssuer
givenCR *cmapi.CertificateRequest
assertSignedCert func(t *testing.T, got *x509.Certificate)
wantErr string
}{
"when the CertificateRequest has the duration field set, it should appear as notAfter on the signed ca": {
givenCAIssuer: gen.Issuer("issuer-1", gen.SetIssuerCA(cmapi.CAIssuer{
SecretName: "secret-1",
})),
givenCR: gen.CertificateRequest("cr-1",
gen.SetCertificateRequestCSR(rsaCSR),
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Name: "issuer-1",
Group: certmanager.GroupName,
Kind: "Issuer",
}),
gen.SetCertificateRequestDuration(&metav1.Duration{
Duration: 30 * time.Minute,
}),
),
givenCASecret: gen.SecretFrom(gen.Secret("secret-1"), gen.SetSecretNamespace("default"), gen.SetSecretData(secretDataFor(t, rsaPair,
&x509.Certificate{
SerialNumber: big.NewInt(1234),
IsCA: true,
},
))),
assertSignedCert: func(t *testing.T, got *x509.Certificate) {
// Although there is less than 1µs between the time.Now
// call made by the certificate template func (in the "pki"
// package) and the time.Now below, rounding or truncating
// will always end up with a flaky test. This is due to the
// rounding made to the notAfter value when serializing the
// certificate to ASN.1 [1].
//
// [1]: https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
//
// So instead of using a truncation or rounding in order to
// check the time, we use a delta of 1 second. One entire
// second is totally overkill since, as detailed above, the
// delay is probably less than a microsecond. But that will
// do for now!
//
// Note that we do have a plan to fix this. We want to be
// injecting a time (instead of time.Now) to the template
// functions. This work is being tracked in this issue:
// https://github.com/jetstack/cert-manager/issues/3738
expectNotAfter := time.Now().UTC().Add(30 * time.Minute)
deltaSec := math.Abs(expectNotAfter.Sub(got.NotAfter).Seconds())
assert.LessOrEqualf(t, deltaSec, 1., "expected a time delta lower than 1 second. Time expected='%s', got='%s'", expectNotAfter.String(), got.NotAfter.String())
},
},
"when the CertificateRequest has the isCA field set, it should appear on the signed ca": {
givenCAIssuer: gen.Issuer("issuer-1", gen.SetIssuerCA(cmapi.CAIssuer{
SecretName: "secret-1",
})),
givenCR: gen.CertificateRequest("cr-1",
gen.SetCertificateRequestCSR(rsaCSR),
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Name: "issuer-1",
Group: certmanager.GroupName,
Kind: "Issuer",
}),
gen.SetCertificateRequestIsCA(true),
),
givenCASecret: gen.SecretFrom(gen.Secret("secret-1"), gen.SetSecretNamespace("default"), gen.SetSecretData(secretDataFor(t, rsaPair,
&x509.Certificate{
SerialNumber: big.NewInt(1234),
IsCA: true,
},
))),
assertSignedCert: func(t *testing.T, got *x509.Certificate) {
assert.Equal(t, true, got.IsCA)
},
},
"when the Issuer has ocspServers set, it should appear on the signed ca": {
givenCAIssuer: gen.Issuer("issuer-1", gen.SetIssuerCA(cmapi.CAIssuer{
SecretName: "secret-1",
OCSPServers: []string{"http://ocsp-v3.example.org"},
})),
givenCR: gen.CertificateRequest("cr-1",
gen.SetCertificateRequestCSR(rsaCSR),
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Name: "issuer-1",
Group: certmanager.GroupName,
Kind: "Issuer",
}),
),
givenCASecret: gen.SecretFrom(gen.Secret("secret-1"), gen.SetSecretNamespace("default"), gen.SetSecretData(secretDataFor(t, rsaPair,
&x509.Certificate{
SerialNumber: big.NewInt(1234),
IsCA: true,
},
))),
assertSignedCert: func(t *testing.T, got *x509.Certificate) {
assert.Equal(t, []string{"http://ocsp-v3.example.org"}, got.OCSPServer)
},
},
"when the Issuer has crlDistributionPoints set, it should appear on the signed ca ": {
givenCAIssuer: gen.Issuer("issuer-1", gen.SetIssuerCA(cmapi.CAIssuer{
SecretName: "secret-1",
CRLDistributionPoints: []string{"http://www.example.com/crl/test.crl"},
})),
givenCR: gen.CertificateRequest("cr-1",
gen.SetCertificateRequestIsCA(true),
gen.SetCertificateRequestCSR(rsaCSR),
gen.SetCertificateRequestIssuer(cmmeta.ObjectReference{
Name: "issuer-1",
Group: certmanager.GroupName,
Kind: "Issuer",
}),
),
givenCASecret: gen.SecretFrom(gen.Secret("secret-1"), gen.SetSecretNamespace("default"), gen.SetSecretData(secretDataFor(t, rsaPair,
&x509.Certificate{
SerialNumber: big.NewInt(1234),
IsCA: true,
},
))),
assertSignedCert: func(t *testing.T, gotCA *x509.Certificate) {
assert.Equal(t, []string{"http://www.example.com/crl/test.crl"}, gotCA.CRLDistributionPoints)
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
rec := &controllertest.FakeRecorder{}
c := &CA{
issuerOptions: controller.IssuerOptions{
ClusterResourceNamespace: "",
ClusterIssuerAmbientCredentials: false,
IssuerAmbientCredentials: false,
},
reporter: util.NewReporter(fixedClock, rec),
secretsLister: listers.FakeSecretListerFrom(listers.NewFakeSecretLister(),
listers.SetFakeSecretNamespaceListerGet(test.givenCASecret, nil),
),
templateGenerator: pki.GenerateTemplateFromCertificateRequest,
}
gotIssueResp, gotErr := c.Sign(context.Background(), test.givenCR, test.givenCAIssuer)
if test.wantErr != "" {
require.EqualError(t, gotErr, test.wantErr)
} else {
require.NoError(t, gotErr)
require.NotNil(t, gotIssueResp)
gotCert, err := pki.DecodeX509CertificateBytes(gotIssueResp.Certificate)
require.NoError(t, err)
test.assertSignedCert(t, gotCert)
}
})
}
}
// Returns a map that is meant to be used for creating a certificate Secret
// that contains the fields "tls.crt" and "tls.key".
func secretDataFor(t *testing.T, caKey *rsa.PrivateKey, caCrt *x509.Certificate) (secretData map[string][]byte) {
rootCADER, err := x509.CreateCertificate(rand.Reader, caCrt, caCrt, caKey.Public(), caKey)
require.NoError(t, err)
caCrt, err = x509.ParseCertificate(rootCADER)
require.NoError(t, err)
caKeyPEM, err := pki.EncodePKCS8PrivateKey(caKey)
require.NoError(t, err)
caCrtPEM, err := pki.EncodeX509(caCrt)
require.NoError(t, err)
return map[string][]byte{
"tls.key": caKeyPEM,
"tls.crt": caCrtPEM,
}
}
| 1 | 26,676 | Nit: below on line 449 we use `%v` to print an error - perhaps would be nice to do it in the same way in both places (but probably not that important). Similar in other places in this PR. | jetstack-cert-manager | go |
@@ -169,6 +169,8 @@ class Flow(stateobject.StateObject):
self.reply.take()
self.reply.kill(force=True)
self.reply.commit()
+ self.live = False
+ master.addons("kill", self)
def intercept(self, master):
""" | 1 | import time
import copy
import uuid
from mitmproxy import controller # noqa
from mitmproxy import stateobject
from mitmproxy import connections
from mitmproxy import version
import typing # noqa
class Error(stateobject.StateObject):
"""
An Error.
This is distinct from an protocol error response (say, a HTTP code 500),
which is represented by a normal HTTPResponse object. This class is
responsible for indicating errors that fall outside of normal protocol
communications, like interrupted connections, timeouts, protocol errors.
Exposes the following attributes:
msg: Message describing the error
timestamp: Seconds since the epoch
"""
def __init__(self, msg: str, timestamp=None) -> None:
"""
@type msg: str
@type timestamp: float
"""
self.msg = msg
self.timestamp = timestamp or time.time()
_stateobject_attributes = dict(
msg=str,
timestamp=float
)
def __str__(self):
return self.msg
def __repr__(self):
return self.msg
@classmethod
def from_state(cls, state):
# the default implementation assumes an empty constructor. Override
# accordingly.
f = cls(None)
f.set_state(state)
return f
def copy(self):
c = copy.copy(self)
return c
class Flow(stateobject.StateObject):
"""
A Flow is a collection of objects representing a single transaction.
This class is usually subclassed for each protocol, e.g. HTTPFlow.
"""
def __init__(
self,
type: str,
client_conn: connections.ClientConnection,
server_conn: connections.ServerConnection,
live: bool=None
) -> None:
self.type = type
self.id = str(uuid.uuid4())
self.client_conn = client_conn
self.server_conn = server_conn
self.live = live
self.error = None # type: typing.Optional[Error]
self.intercepted = False # type: bool
self._backup = None # type: typing.Optional[Flow]
self.reply = None # type: typing.Optional[controller.Reply]
self.marked = False # type: bool
self.metadata = dict() # type: typing.Dict[str, str]
_stateobject_attributes = dict(
id=str,
error=Error,
client_conn=connections.ClientConnection,
server_conn=connections.ServerConnection,
type=str,
intercepted=bool,
marked=bool,
metadata=dict,
)
def get_state(self):
d = super().get_state()
d.update(version=version.IVERSION)
if self._backup and self._backup != d:
d.update(backup=self._backup)
return d
def set_state(self, state):
state.pop("version")
if "backup" in state:
self._backup = state.pop("backup")
super().set_state(state)
@classmethod
def from_state(cls, state):
f = cls(None, None)
f.set_state(state)
return f
def copy(self):
f = copy.copy(self)
f.id = str(uuid.uuid4())
f.live = False
f.client_conn = self.client_conn.copy()
f.server_conn = self.server_conn.copy()
f.metadata = self.metadata.copy()
if self.error:
f.error = self.error.copy()
return f
def modified(self):
"""
Has this Flow been modified?
"""
if self._backup:
return self._backup != self.get_state()
else:
return False
def backup(self, force=False):
"""
Save a backup of this Flow, which can be reverted to using a
call to .revert().
"""
if not self._backup:
self._backup = self.get_state()
def revert(self):
"""
Revert to the last backed up state.
"""
if self._backup:
self.set_state(self._backup)
self._backup = None
@property
def killable(self):
return self.reply and self.reply.state in {"handled", "taken"}
def kill(self, master):
"""
Kill this request.
"""
self.error = Error("Connection killed")
self.intercepted = False
# reply.state should only be "handled" or "taken" here.
# if none of this is the case, .take() will raise an exception.
if self.reply.state != "taken":
self.reply.take()
self.reply.kill(force=True)
self.reply.commit()
def intercept(self, master):
"""
Intercept this Flow. Processing will stop until resume is
called.
"""
if self.intercepted:
return
self.intercepted = True
self.reply.take()
master.addons("intercept", self)
def resume(self, master):
"""
Continue with the flow - called after an intercept().
"""
if not self.intercepted:
return
self.intercepted = False
self.reply.ack()
self.reply.commit()
master.addons("intercept", self)
| 1 | 12,464 | I'm not against adding a kill event, but we need to do it properly, please. That means listing it in events.py, adding docs for it, and so forth. | mitmproxy-mitmproxy | py |
@@ -2083,9 +2083,15 @@ class CppGenerator : public BaseGenerator {
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", Name(field));
if (field.value.type.base_type == BASE_TYPE_STRING) {
- code_ +=
- " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
- "_fbb.CreateString({{FIELD_NAME}}) : 0;";
+ if (!field.native_shared) {
+ code_ +=
+ " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
+ "_fbb.CreateString({{FIELD_NAME}}) : 0;";
+ } else {
+ code_ +=
+ " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
+ "_fbb.CreateSharedString({{FIELD_NAME}}) : 0;";
+ }
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\";
const auto vtype = field.value.type.VectorType(); | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/code_generators.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include <unordered_set>
namespace flatbuffers {
// Pedantic warning free version of toupper().
inline char ToUpper(char c) { return static_cast<char>(::toupper(c)); }
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr),
float_const_gen_("std::numeric_limits<double>::",
"std::numeric_limits<float>::", "quiet_NaN()",
"infinity()") {
static const char * const keywords[] = {
"alignas",
"alignof",
"and",
"and_eq",
"asm",
"atomic_cancel",
"atomic_commit",
"atomic_noexcept",
"auto",
"bitand",
"bitor",
"bool",
"break",
"case",
"catch",
"char",
"char16_t",
"char32_t",
"class",
"compl",
"concept",
"const",
"constexpr",
"const_cast",
"continue",
"co_await",
"co_return",
"co_yield",
"decltype",
"default",
"delete",
"do",
"double",
"dynamic_cast",
"else",
"enum",
"explicit",
"export",
"extern",
"false",
"float",
"for",
"friend",
"goto",
"if",
"import",
"inline",
"int",
"long",
"module",
"mutable",
"namespace",
"new",
"noexcept",
"not",
"not_eq",
"nullptr",
"operator",
"or",
"or_eq",
"private",
"protected",
"public",
"register",
"reinterpret_cast",
"requires",
"return",
"short",
"signed",
"sizeof",
"static",
"static_assert",
"static_cast",
"struct",
"switch",
"synchronized",
"template",
"this",
"thread_local",
"throw",
"true",
"try",
"typedef",
"typeid",
"typename",
"union",
"unsigned",
"using",
"virtual",
"void",
"volatile",
"wchar_t",
"while",
"xor",
"xor_eq",
nullptr };
for (auto kw = keywords; *kw; kw++) keywords_.insert(*kw);
}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) const { return !is_alnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.current_namespace_;
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ToUpper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
if (it->second.empty()) continue;
auto noext = flatbuffers::StripExtension(it->second);
auto basename = flatbuffers::StripPath(noext);
code_ += "#include \"" + parser_.opts.include_prefix +
(parser_.opts.keep_include_path ? noext : basename) +
"_generated.h\"";
num_includes++;
}
if (num_includes) code_ += "";
}
std::string EscapeKeyword(const std::string &name) const {
return keywords_.find(name) == keywords_.end() ? name : name + "_";
}
std::string Name(const Definition &def) const {
return EscapeKeyword(def.name);
}
std::string Name(const EnumVal &ev) const { return EscapeKeyword(ev.name); }
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
if (parser_.opts.gen_nullable) {
code_ += "#pragma clang system_header\n\n";
}
code_ += "#include \"flatbuffers/flatbuffers.h\"";
if (parser_.uses_flexbuffers_) {
code_ += "#include \"flatbuffers/flexbuffers.h\"";
}
code_ += "";
if (parser_.opts.include_dependence_headers) { GenIncludeDependencies(); }
FLATBUFFERS_ASSERT(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + Name(struct_def) + ";";
if (parser_.opts.generate_object_based_api) {
auto nativeName = NativeName(Name(struct_def), &struct_def, parser_.opts);
if (!struct_def.fixed) {
code_ += "struct " + nativeName + ";";
}
}
code_ += "";
}
}
// Generate forward declarations for all equal operators
if (parser_.opts.generate_object_based_api && parser_.opts.gen_compare) {
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
auto nativeName = NativeName(Name(struct_def), &struct_def, parser_.opts);
code_ += "bool operator==(const " + nativeName + " &lhs, const " + nativeName + " &rhs);";
}
}
code_ += "";
}
// Generate preablmle code for mini reflection.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
// To break cyclic dependencies, first pre-declare all tables/structs.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenMiniReflectPre(&struct_def);
}
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate code for mini reflection.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
// Then the unions/enums that may refer to them.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenMiniReflect(nullptr, &enum_def);
}
}
// Then the full tables/structs.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenMiniReflect(&struct_def, nullptr);
}
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
auto name = Name(struct_def);
auto qualified_name = cur_name_space_->GetFullyQualifiedName(name);
auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
code_.SetValue("NULLABLE_EXT", NullableExtension());
// The root datatype accessor:
code_ += "inline \\";
code_ +=
"const {{CPP_NAME}} *{{NULLABLE_EXT}}Get{{STRUCT_NAME}}(const void "
"*buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
code_ += "inline \\";
code_ +=
"const {{CPP_NAME}} *{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void "
"*buf) {";
code_ += " return flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.FinishSizePrefixed(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def), &struct_def, parser_.opts);
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
if (cur_name_space_) SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
std::unordered_set<std::string> keywords_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
static const char * const ctypename[] = {
// clang-format off
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \
RTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
// clang-format on
};
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: { return "void"; }
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
std::string NullableExtension() {
return parser_.opts.gen_nullable ? " _Nullable " : "";
}
static std::string NativeName(const std::string &name, const StructDef *sd,
const IDLOptions &opts) {
return sd && !sd->fixed ? opts.object_prefix + name + opts.object_suffix
: name;
}
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
const std::string NativeString(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr;
auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type;
if (ret.empty()) { return "std::string"; }
return ret;
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return (ptr_type != "default_ptr_type" ? ptr_type :
parser_.opts.cpp_object_api_pointer_type) + "<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto cpp_ptr_type_get = field.attributes.Lookup("cpp_ptr_type_get");
if (cpp_ptr_type_get)
return cpp_ptr_type_get->constant;
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return NativeString(&field);
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
if (type.struct_def &&
type.struct_def->attributes.Lookup("native_custom_alloc")) {
auto native_custom_alloc =
type.struct_def->attributes.Lookup("native_custom_alloc");
return "std::vector<" + type_name + "," +
native_custom_alloc->constant + "<" + type_name + ">>";
} else
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) { type_name = native_type->constant; }
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(
NativeName(type_name, type.struct_def, parser_.opts), &field,
false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: { return GenTypeBasic(type, true); }
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + Name(enum_def);
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? Name(enum_def) + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return Name(enum_def) + "::" + Name(enum_val);
} else if (opts.prefixed_enums) {
return Name(enum_def) + "_" + Name(enum_val);
} else {
return Name(enum_val);
}
}
std::string StripUnionType(const std::string &name) {
return name.substr(0, name.size() - strlen(UnionTypeFieldSuffix()));
}
std::string GetUnionElement(const EnumVal &ev, bool wrap, bool actual_type,
bool native_type = false) {
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
auto name = actual_type ? ev.union_type.struct_def->name : Name(ev);
return wrap ? WrapInNameSpace(ev.union_type.struct_def->defined_namespace,
name)
: name;
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
return actual_type ? (native_type ? "std::string" : "flatbuffers::String")
: Name(ev);
} else {
FLATBUFFERS_ASSERT(false);
return Name(ev);
}
}
std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + Name(enum_def) +
"(flatbuffers::Verifier &verifier, const void *obj, " +
Name(enum_def) + " type)";
}
std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + Name(enum_def) + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
std::string UnionUnPackSignature(const EnumDef &enum_def, bool inclass) {
return (inclass ? "static " : "") + std::string("void *") +
(inclass ? "" : Name(enum_def) + "Union::") +
"UnPack(const void *obj, " + Name(enum_def) +
" type, const flatbuffers::resolver_function_t *resolver)";
}
std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : Name(enum_def) + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
std::string TableCreateSignature(const StructDef &struct_def, bool predecl,
const IDLOptions &opts) {
return "flatbuffers::Offset<" + Name(struct_def) + "> Create" +
Name(struct_def) + "(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(Name(struct_def), &struct_def, opts) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
std::string TablePackSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return std::string(inclass ? "static " : "") + "flatbuffers::Offset<" +
Name(struct_def) + "> " + (inclass ? "" : Name(struct_def) + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const " +
NativeName(Name(struct_def), &struct_def, opts) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
std::string TableUnPackSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return NativeName(Name(struct_def), &struct_def, opts) + " *" +
(inclass ? "" : Name(struct_def) + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
std::string TableUnPackToSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return "void " + (inclass ? "" : Name(struct_def) + "::") + "UnPackTo(" +
NativeName(Name(struct_def), &struct_def, opts) + " *" +
"_o, const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
void GenMiniReflectPre(const StructDef *struct_def) {
code_.SetValue("NAME", struct_def->name);
code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable();";
code_ += "";
}
void GenMiniReflect(const StructDef *struct_def, const EnumDef *enum_def) {
code_.SetValue("NAME", struct_def ? struct_def->name : enum_def->name);
code_.SetValue("SEQ_TYPE",
struct_def ? (struct_def->fixed ? "ST_STRUCT" : "ST_TABLE")
: (enum_def->is_union ? "ST_UNION" : "ST_ENUM"));
auto num_fields =
struct_def ? struct_def->fields.vec.size() : enum_def->vals.vec.size();
code_.SetValue("NUM_FIELDS", NumToString(num_fields));
std::vector<std::string> names;
std::vector<Type> types;
bool consecutive_enum_from_zero = true;
if (struct_def) {
for (auto it = struct_def->fields.vec.begin();
it != struct_def->fields.vec.end(); ++it) {
const auto &field = **it;
names.push_back(Name(field));
types.push_back(field.value.type);
}
} else {
for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end();
++it) {
const auto &ev = **it;
names.push_back(Name(ev));
types.push_back(enum_def->is_union ? ev.union_type
: Type(enum_def->underlying_type));
if (static_cast<int64_t>(it - enum_def->vals.vec.begin()) != ev.value) {
consecutive_enum_from_zero = false;
}
}
}
std::string ts;
std::vector<std::string> type_refs;
for (auto it = types.begin(); it != types.end(); ++it) {
auto &type = *it;
if (!ts.empty()) ts += ",\n ";
auto is_vector = type.base_type == BASE_TYPE_VECTOR;
auto bt = is_vector ? type.element : type.base_type;
auto et = IsScalar(bt) || bt == BASE_TYPE_STRING
? bt - BASE_TYPE_UTYPE + ET_UTYPE
: ET_SEQUENCE;
int ref_idx = -1;
std::string ref_name =
type.struct_def
? WrapInNameSpace(*type.struct_def)
: type.enum_def ? WrapInNameSpace(*type.enum_def) : "";
if (!ref_name.empty()) {
auto rit = type_refs.begin();
for (; rit != type_refs.end(); ++rit) {
if (*rit == ref_name) {
ref_idx = static_cast<int>(rit - type_refs.begin());
break;
}
}
if (rit == type_refs.end()) {
ref_idx = static_cast<int>(type_refs.size());
type_refs.push_back(ref_name);
}
}
ts += "{ flatbuffers::" + std::string(ElementaryTypeNames()[et]) + ", " +
NumToString(is_vector) + ", " + NumToString(ref_idx) + " }";
}
std::string rs;
for (auto it = type_refs.begin(); it != type_refs.end(); ++it) {
if (!rs.empty()) rs += ",\n ";
rs += *it + "TypeTable";
}
std::string ns;
for (auto it = names.begin(); it != names.end(); ++it) {
if (!ns.empty()) ns += ",\n ";
ns += "\"" + *it + "\"";
}
std::string vs;
if (enum_def && !consecutive_enum_from_zero) {
for (auto it = enum_def->vals.vec.begin(); it != enum_def->vals.vec.end();
++it) {
const auto &ev = **it;
if (!vs.empty()) vs += ", ";
vs += NumToString(ev.value);
}
} else if (struct_def && struct_def->fixed) {
for (auto it = struct_def->fields.vec.begin();
it != struct_def->fields.vec.end(); ++it) {
const auto &field = **it;
vs += NumToString(field.value.offset);
vs += ", ";
}
vs += NumToString(struct_def->bytesize);
}
code_.SetValue("TYPES", ts);
code_.SetValue("REFS", rs);
code_.SetValue("NAMES", ns);
code_.SetValue("VALUES", vs);
code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable() {";
if (num_fields) {
code_ += " static const flatbuffers::TypeCode type_codes[] = {";
code_ += " {{TYPES}}";
code_ += " };";
}
if (!type_refs.empty()) {
code_ += " static const flatbuffers::TypeFunction type_refs[] = {";
code_ += " {{REFS}}";
code_ += " };";
}
if (!vs.empty()) {
code_ += " static const int64_t values[] = { {{VALUES}} };";
}
auto has_names =
num_fields && parser_.opts.mini_reflect == IDLOptions::kTypesAndNames;
if (has_names) {
code_ += " static const char * const names[] = {";
code_ += " {{NAMES}}";
code_ += " };";
}
code_ += " static const flatbuffers::TypeTable tt = {";
code_ += std::string(" flatbuffers::{{SEQ_TYPE}}, {{NUM_FIELDS}}, ") +
(num_fields ? "type_codes, " : "nullptr, ") +
(!type_refs.empty() ? "type_refs, " : "nullptr, ") +
(!vs.empty() ? "values, " : "nullptr, ") +
(has_names ? "names" : "nullptr");
code_ += " };";
code_ += " return &tt;";
code_ += "}";
code_ += "";
}
// Generate an enum declaration,
// an enum string lookup table,
// and an enum array of values
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", Name(enum_def));
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
if (parser_.opts.scoped_enums) code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
int64_t anyv = 0;
const EnumVal *minv = nullptr, *maxv = nullptr;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
GenComment(ev.doc_comment, " ");
code_.SetValue("KEY", GenEnumValDecl(enum_def, Name(ev)));
code_.SetValue("VALUE", NumToString(ev.value));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
minv = !minv || minv->value > ev.value ? &ev : minv;
maxv = !maxv || maxv->value < ev.value ? &ev : maxv;
anyv |= ev.value;
}
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
FLATBUFFERS_ASSERT(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE", NumToString(anyv));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY", GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ += "FLATBUFFERS_DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate an array of all enumeration values
auto num_fields = NumToString(enum_def.vals.vec.size());
code_ += "inline const {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" + num_fields +
"] {";
code_ += " static const {{ENUM_NAME}} values[] = {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
auto value = GetEnumValUse(enum_def, ev);
auto suffix = *it != enum_def.vals.vec.back() ? "," : "";
code_ += " " + value + suffix;
}
code_ += " };";
code_ += " return values;";
code_ += "}";
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range =
enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) <
kMaxSparseness) {
code_ += "inline const char * const *EnumNames{{ENUM_NAME}}() {";
code_ += " static const char * const names[] = {";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
while (val++ != ev.value) { code_ += " \"\","; }
code_ += " \"" + Name(ev) + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " if (e < " + GetEnumValUse(enum_def, *enum_def.vals.vec.front()) +
" || e > " + GetEnumValUse(enum_def, *enum_def.vals.vec.back()) +
") return \"\";";
code_ += " const size_t index = static_cast<int>(e)\\";
if (enum_def.vals.vec.front()->value) {
auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front());
code_ += " - static_cast<int>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
} else {
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " switch (e) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_ += " case " + GetEnumValUse(enum_def, ev) + ": return \"" +
Name(ev) + "\";";
}
code_ += " default: return \"\";";
code_ += " }";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union && !enum_def.uses_multiple_type_instances) {
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (it == enum_def.vals.vec.begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
} else {
auto name = GetUnionElement(ev, true, true);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", Name(enum_def));
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " void *value;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), value(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :";
code_ += " type({{NONE}}), value(nullptr)";
code_ += " { std::swap(type, u.type); std::swap(value, u.value); }";
code_ += " {{NAME}}Union(const {{NAME}}Union &) FLATBUFFERS_NOEXCEPT;";
code_ +=
" {{NAME}}Union &operator=(const {{NAME}}Union &u) "
"FLATBUFFERS_NOEXCEPT";
code_ +=
" { {{NAME}}Union t(u); std::swap(type, t.type); std::swap(value, "
"t.value); return *this; }";
code_ +=
" {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT";
code_ +=
" { std::swap(type, u.type); std::swap(value, u.value); return "
"*this; }";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
if (!enum_def.uses_multiple_type_instances) {
code_ += "#ifndef FLATBUFFERS_CPP98_STL";
code_ += " template <typename T>";
code_ += " void Set(T&& val) {";
code_ += " Reset();";
code_ +=
" type = {{NAME}}Traits<typename T::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " value = new T(std::forward<T>(val));";
code_ += " }";
code_ += " }";
code_ += "#endif // FLATBUFFERS_CPP98_STL";
code_ += "";
}
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) { continue; }
const auto native_type =
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts);
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", Name(ev));
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(value) : nullptr;";
code_ += " }";
code_ += " const {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() const {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ +=
" reinterpret_cast<const {{NATIVE_TYPE}} *>(value) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
if (parser_.opts.gen_compare) {
code_ += "";
code_ += "inline bool operator==(const {{NAME}}Union &lhs, const {{NAME}}Union &rhs) {";
code_ += " if (lhs.type != rhs.type) return false;";
code_ += " switch (lhs.type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
if (ev.value) {
const auto native_type =
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts);
code_.SetValue("NATIVE_TYPE", native_type);
code_ += " case {{NATIVE_ID}}: {";
code_ += " return *(reinterpret_cast<const {{NATIVE_TYPE}} *>(lhs.value)) ==";
code_ += " *(reinterpret_cast<const {{NATIVE_TYPE}} *>(rhs.value));";
code_ += " }";
} else {
code_ += " case {{NATIVE_ID}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += "}";
}
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", Name(enum_def));
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.value) {
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
auto getptr =
" auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return true;";
} else {
code_ += getptr;
code_ += " return verifier.VerifyTable(ptr);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += getptr;
code_ += " return verifier.VerifyString(ptr);";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (!values || !types) return !values && !types;";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + Name(enum_def) + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" +
Name(enum_def) + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return new " +
WrapInNameSpace(*ev.union_type.struct_def) + "(*ptr);";
} else {
code_ += " return ptr->UnPack(resolver);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return new std::string(ptr->c_str(), ptr->size());";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
if (!ev.value) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_.SetValue("NAME", GetUnionElement(ev, false, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(value);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return _fbb.CreateStruct(*ptr).Union();";
} else {
code_ +=
" return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return _fbb.CreateString(*ptr).Union();";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union copy constructor
code_ +=
"inline {{ENUM_NAME}}Union::{{ENUM_NAME}}Union(const "
"{{ENUM_NAME}}Union &u) FLATBUFFERS_NOEXCEPT : type(u.type), "
"value(nullptr) {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_ += " case {{LABEL}}: {";
bool copyable = true;
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
// Don't generate code to copy if table is not copyable.
// TODO(wvo): make tables copyable instead.
for (auto fit = ev.union_type.struct_def->fields.vec.begin();
fit != ev.union_type.struct_def->fields.vec.end(); ++fit) {
const auto &field = **fit;
if (!field.deprecated && field.value.type.struct_def &&
!field.native_inline) {
copyable = false;
break;
}
}
}
if (copyable) {
code_ +=
" value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>"
"(u.value));";
} else {
code_ += " FLATBUFFERS_ASSERT(false); // {{TYPE}} not copyable.";
}
code_ += " break;";
code_ += " }";
}
code_ += " default:";
code_ += " break;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
code_.SetValue("NONE",
GetEnumValUse(enum_def, *enum_def.vals.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end();
++it) {
const auto &ev = **it;
if (!ev.value) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(value);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " value = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = Name(field);
std::transform(uname.begin(), uname.end(), uname.begin(), ToUpper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const StructDef &struct_def,
const std::string &name) {
if (!parser_.opts.generate_name_strings) { return; }
auto fullname = struct_def.defined_namespace->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
if(IsFloat(field.value.type.base_type))
return float_const_gen_.GenFloatConstant(field);
else
return field.value.constant;
}
std::string GetDefaultScalarValue(const FieldDef &field, bool is_ctor) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->ReverseLookup(
StringToInt(field.value.constant.c_str()), false);
if (ev) {
return WrapInNameSpace(field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(field, true, field.value.constant);
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else if (field.attributes.Lookup("cpp_type")) {
if (is_ctor) {
if (PtrType(&field) == "naked") {
return "nullptr";
} else {
return "";
}
} else {
return "0";
}
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", Name(field));
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
const auto vtype = field.value.type.VectorType();
std::string type;
if (IsStruct(vtype)) {
type = WrapInNameSpace(*vtype.struct_def);
} else {
type = GenTypeWire(vtype, "", false);
}
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field, false));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE &&
(field.value.type.base_type != BASE_TYPE_VECTOR ||
field.value.type.element != BASE_TYPE_UTYPE)) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type =
(cpp_type ? (field.value.type.base_type == BASE_TYPE_VECTOR
? "std::vector<" + GenTypeNativePtr(cpp_type->constant, &field, false) + "> "
: GenTypeNativePtr(cpp_type->constant, &field, false))
: type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", Name(field));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef &struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
auto native_default = field.attributes.Lookup("native_default");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field);
initializer_list += "(" + (native_default ? std::string(native_default->constant) : GetDefaultScalarValue(field, true)) + ")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
Name(field) + "(" + native_default->constant + ")";
}
}
} else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field) + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME",
NativeName(Name(struct_def), &struct_def, parser_.opts));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenCompareOperator(const StructDef &struct_def, std::string accessSuffix = "") {
std::string compare_op;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE &&
(field.value.type.base_type != BASE_TYPE_VECTOR ||
field.value.type.element != BASE_TYPE_UTYPE)) {
if (!compare_op.empty()) {
compare_op += " &&\n ";
}
auto accessor = Name(field) + accessSuffix;
compare_op += "(lhs." + accessor + " == rhs." + accessor + ")";
}
}
std::string cmp_lhs;
std::string cmp_rhs;
if (compare_op.empty()) {
cmp_lhs = "";
cmp_rhs = "";
compare_op = " return true;";
} else {
cmp_lhs = "lhs";
cmp_rhs = "rhs";
compare_op = " return\n " + compare_op + ";";
}
code_.SetValue("CMP_OP", compare_op);
code_.SetValue("CMP_LHS", cmp_lhs);
code_.SetValue("CMP_RHS", cmp_rhs);
code_ += "";
code_ += "inline bool operator==(const {{NATIVE_NAME}} &{{CMP_LHS}}, const {{NATIVE_NAME}} &{{CMP_RHS}}) {";
code_ += "{{CMP_OP}}";
code_ += "}";
}
void GenOperatorNewDelete(const StructDef &struct_def) {
if (auto native_custom_alloc =
struct_def.attributes.Lookup("native_custom_alloc")) {
code_ += " inline void *operator new (std::size_t count) {";
code_ += " return " + native_custom_alloc->constant +
"<{{NATIVE_NAME}}>().allocate(count / sizeof({{NATIVE_NAME}}));";
code_ += " }";
code_ += " inline void operator delete (void *ptr) {";
code_ += " return " + native_custom_alloc->constant +
"<{{NATIVE_NAME}}>().deallocate(static_cast<{{NATIVE_NAME}}*>("
"ptr),1);";
code_ += " }";
}
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name =
NativeName(Name(struct_def), &struct_def, parser_.opts);
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(struct_def, native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenOperatorNewDelete(struct_def);
GenDefaultConstructor(struct_def);
code_ += "};";
if (parser_.opts.gen_compare) GenCompareOperator(struct_def);
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", Name(field));
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
if (IsScalar(field.value.type.base_type) || IsStruct(field.value.type)) {
code_ +=
"{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
} else {
code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\";
}
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ +=
"{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyString({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.VerifyVector({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ +=
"{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), "
"{{NAME}}_type())\\";
break;
}
default: break;
}
break;
}
default: { break; }
}
}
// Generate CompareWithValue method for a key field.
void GenKeyFieldMethods(const FieldDef &field) {
FLATBUFFERS_ASSERT(field.key);
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
// use operator< of flatbuffers::String
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
FLATBUFFERS_ASSERT(IsScalar(field.value.type.base_type));
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
// Returns {field<val: -1, field==val: 0, field>val: +1}.
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ +=
" return static_cast<int>({{FIELD_NAME}}() > val) - "
"static_cast<int>({{FIELD_NAME}}() < val);";
code_ += " }";
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) { GenNativeTable(struct_def); }
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_ +=
"struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
code_ += " static const flatbuffers::TypeTable *MiniReflectTypeTable() {";
code_ += " return {{STRUCT_NAME}}TypeTable();";
code_ += " }";
}
GenFullyQualifiedNameGetter(struct_def, Name(struct_def));
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ += " enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", Name(field));
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) { call += ", " + GenDefaultConstant(field); }
call += ")";
std::string afterptr = " *" + NullableExtension();
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE", GenTypeGet(field.value.type, " ", "const ",
afterptr.c_str(), true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_.SetValue("NULLABLE_EXT", NullableExtension());
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (field.value.type.base_type == BASE_TYPE_UNION) {
auto u = field.value.type.enum_def;
if (!field.value.type.enum_def->uses_multiple_type_instances)
code_ +=
" template<typename T> "
"const T *{{NULLABLE_EXT}}{{FIELD_NAME}}_as() const;";
for (auto u_it = u->vals.vec.begin(); u_it != u->vals.vec.end();
++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; }
auto full_struct_name = GetUnionElement(ev, true, true);
// @TODO: Mby make this decisions more universal? How?
code_.SetValue(
"U_GET_TYPE",
EscapeKeyword(field.name + UnionTypeFieldSuffix()));
code_.SetValue(
"U_ELEMENT_TYPE",
WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev));
code_.SetValue("U_NULLABLE", NullableExtension());
// `const Type *union_name_asType() const` accessor.
code_ += " {{U_FIELD_TYPE}}{{U_NULLABLE}}{{U_FIELD_NAME}}() const {";
code_ +=
" return {{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}} ? "
"static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) "
": nullptr;";
code_ += " }";
}
}
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("SET_FN", "SetField<" + type + ">");
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + Name(field)));
code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field));
code_ +=
" bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ +=
" return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, "
"{{DEFAULT_VALUE}});";
code_ += " }";
} else {
auto postptr = " *" + NullableExtension();
auto type =
GenTypeGet(field.value.type, " ", "", postptr.c_str(), true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name = nested->constant;
auto nested_root = parser_.LookupStruct(nested->constant);
if (nested_root == nullptr) {
qualified_name = parser_.current_namespace_->GetFullyQualifiedName(
nested->constant);
nested_root = parser_.LookupStruct(qualified_name);
}
FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>({{FIELD_NAME}}()->Data());";
code_ += " }";
}
if (field.flexbuffer) {
code_ +=
" flexbuffers::Reference {{FIELD_NAME}}_flexbuffer_root()"
" const {";
// Both Data() and size() are const-methods, therefore call order doesn't matter.
code_ +=
" return flexbuffers::GetRoot({{FIELD_NAME}}()->Data(), "
"{{FIELD_NAME}}()->size());";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
GenKeyFieldMethods(field);
}
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) { continue; }
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ +=
" " + TableUnPackSignature(struct_def, true, parser_.opts) + ";";
code_ +=
" " + TableUnPackToSignature(struct_def, true, parser_.opts) + ";";
code_ += " " + TablePackSignature(struct_def, true, parser_.opts) + ";";
}
code_ += "};"; // End of table.
code_ += "";
// Explicit specializations for union accessors
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated || field.value.type.base_type != BASE_TYPE_UNION) {
continue;
}
auto u = field.value.type.enum_def;
if (u->uses_multiple_type_instances) continue;
code_.SetValue("FIELD_NAME", Name(field));
for (auto u_it = u->vals.vec.begin(); u_it != u->vals.vec.end(); ++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; }
auto full_struct_name = GetUnionElement(ev, true, true);
code_.SetValue(
"U_ELEMENT_TYPE",
WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev));
// `template<> const T *union_name_as<T>() const` accessor.
code_ +=
"template<> "
"inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as"
"<{{U_ELEMENT_NAME}}>() const {";
code_ += " return {{U_FIELD_NAME}}();";
code_ += "}";
code_ += "";
}
}
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true, parser_.opts) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", Name(struct_def));
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) { has_string_or_vector_fields = true; }
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, Name(field));
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", Name(struct_def) + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ +=
" explicit {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder "
"&_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ +=
" {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_);";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ +=
"inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) { GenParam(field, false, ",\n "); }
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", Name(field));
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ += "inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) { GenParam(field, true, ",\n "); }
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name =
struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", Name(field));
if (field.value.type.base_type == BASE_TYPE_STRING) {
code_ +=
" auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
"_fbb.CreateString({{FIELD_NAME}}) : 0;";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\";
const auto vtype = field.value.type.VectorType();
if (IsStruct(vtype)) {
const auto type = WrapInNameSpace(*vtype.struct_def);
code_ += "_fbb.CreateVectorOfStructs<" + type + ">\\";
} else {
const auto type = GenTypeWire(vtype, "", false);
code_ += "_fbb.CreateVector<" + type + ">\\";
}
code_ += "(*{{FIELD_NAME}}) : 0;";
}
}
}
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", Name(field));
code_ += ",\n {{FIELD_NAME}}\\";
if (field.value.type.base_type == BASE_TYPE_STRING ||
field.value.type.base_type == BASE_TYPE_VECTOR) {
code_ += "__\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnionUnpackVal(const FieldDef &afield,
const char *vec_elem_access,
const char *vec_type_access) {
return afield.value.type.enum_def->name +
"Union::UnPack(" + "_e" + vec_elem_access + ", " +
EscapeKeyword(afield.name + UnionTypeFieldSuffix()) +
"()" + vec_type_access + ", _resolver)";
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return val + "->str()";
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(
NativeName(name, type.struct_def, parser_.opts), &afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
case BASE_TYPE_UNION: {
return GenUnionUnpackVal(
afield, invector ? "->Get(_i)" : "",
invector ? ("->GetEnum<" + type.enum_def->name + ">(_i)").c_str()
: "");
}
default: {
return val;
break;
}
}
};
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
auto cpp_type = field.attributes.Lookup("cpp_type");
std::string indexing;
if (field.value.type.enum_def) {
indexing += "static_cast<" +
WrapInNameSpace(*field.value.type.enum_def) + ">(";
}
indexing += "_e->Get(_i)";
if (field.value.type.enum_def) { indexing += ")"; }
if (field.value.type.element == BASE_TYPE_BOOL) { indexing += " != 0"; }
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
auto name = Name(field);
if (field.value.type.element == BASE_TYPE_UTYPE) {
name = StripUnionType(Name(field));
}
auto access =
field.value.type.element == BASE_TYPE_UTYPE
? ".type"
: (field.value.type.element == BASE_TYPE_UNION ? ".value" : "");
code += "{ _o->" + name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "//vector resolver, " + PtrType(&field) + "\n";
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + name + "[_i]" + access + "), ";
code += "static_cast<flatbuffers::hash_value_t>(" + indexing + "));";
if (PtrType(&field) == "naked") {
code += " else ";
code += "_o->" + name + "[_i]" + access + " = nullptr";
} else {
//code += " else ";
//code += "_o->" + name + "[_i]" + access + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
code += "/* else do nothing */";
}
} else {
code += "_o->" + name + "[_i]" + access + " = ";
code +=
GenUnpackVal(field.value.type.VectorType(), indexing, true, field);
}
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
FLATBUFFERS_ASSERT(union_field->value.type.base_type == BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union value, of the form:
// _o->field.value = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + Name(field) + ".value = ";
code += GenUnionUnpackVal(field, "", "");
code += ";";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "//scalar resolver, " + PtrType(&field) + " \n";
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + Name(field) + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
if (PtrType(&field) == "naked") {
code += " else ";
code += "_o->" + Name(field) + " = nullptr;";
} else {
//code += " else ";
//code += "_o->" + Name(field) + " = " + GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
code += "/* else do nothing */;";
}
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + Name(field) + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
const IDLOptions &opts = parser_.opts;
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += StripUnionType(Name(field));
value += ".type";
} else {
value += Name(field);
}
if (field.value.type.base_type != BASE_TYPE_VECTOR && field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value =
"_rehasher ? "
"static_cast<" +
type + ">((*_rehasher)(" + value + GenPtrGet(field) + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
case BASE_TYPE_STRING: {
code += "_fbb.CreateString(" + value + ")";
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it. If there isn't,
// depending on set_empty_to_null either set it to 0 or an empty string.
if (!field.required) {
auto empty_value =
opts.set_empty_to_null ? "0" : "_fbb.CreateSharedString(\"\")";
code = value + ".empty() ? " + empty_value + " : " + code;
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "_fbb.CreateVectorOfNativeStructs<";
code += WrapInNameSpace(*vector_type.struct_def) + ">";
} else {
code += "_fbb.CreateVectorOfStructs";
}
code += "(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">> ";
code += "(" + value + ".size(), ";
code += "[](size_t i, _VectorArgs *__va) { ";
code += "return Create" + vector_type.struct_def->name;
code += "(*__va->__fbb, __va->_" + value + "[i]" +
GenPtrGet(field) + ", ";
code += "__va->__rehasher); }, &_va )";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
case BASE_TYPE_UNION: {
code +=
"_fbb.CreateVector<flatbuffers::"
"Offset<void>>(" +
value +
".size(), [](size_t i, _VectorArgs *__va) { "
"return __va->_" +
value + "[i].Pack(*__va->__fbb, __va->__rehasher); }, &_va)";
break;
}
case BASE_TYPE_UTYPE: {
value = StripUnionType(value);
code += "_fbb.CreateVector<uint8_t>(" + value +
".size(), [](size_t i, _VectorArgs *__va) { "
"return static_cast<uint8_t>(__va->_" +
value + "[i].type); }, &_va)";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVectorScalarCast<" + basetype +
">(flatbuffers::data(" + value + "), " + value +
".size())";
} else if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(vector_type, false);
code += "_fbb.CreateVector<" + type + ">(" + value + ".size(), ";
code += "[](size_t i, _VectorArgs *__va) { ";
code += "return __va->__rehasher ? ";
code += "static_cast<" + type + ">((*__va->__rehasher)";
code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0";
code += "; }, &_va )";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// If set_empty_to_null option is enabled, for optional fields, check to
// see if there actually is any data in _o->field before attempting to
// access it.
if (opts.set_empty_to_null && !field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_.SetValue("NATIVE_NAME",
NativeName(Name(struct_def), &struct_def, parser_.opts));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " +
TableUnPackSignature(struct_def, false, parser_.opts) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " +
TableUnPackToSignature(struct_def, false, parser_.opts) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) { continue; }
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", Name(field));
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false, parser_.opts) +
" {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " +
TableCreateSignature(struct_def, false, parser_.opts) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
code_ +=
" struct _VectorArgs "
"{ flatbuffers::FlatBufferBuilder *__fbb; "
"const " +
NativeName(Name(struct_def), &struct_def, parser_.opts) +
"* __o; "
"const flatbuffers::rehasher_function_t *__rehasher; } _va = { "
"&_fbb, _o, _rehasher}; (void)_va;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) { continue; }
code_ += " auto _" + Name(field) + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name =
struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) { continue; }
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) { pass_by_address = true; }
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + Name(field) + "\\";
} else {
code_ += ",\n _" + Name(field) + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
FLATBUFFERS_ASSERT(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_ +=
"FLATBUFFERS_MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", Name(field));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
GenFullyQualifiedNameGetter(struct_def, Name(struct_def));
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(this, 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = Name(field) + "_";
const auto arg_name = "_" + Name(field);
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
if (!arg_list.empty()) {
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
}
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = Name(field) + "_";
auto value =
is_scalar ? "flatbuffers::EndianScalar(" + member + ")" : member;
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
auto mut_field_type = GenTypeGet(field.value.type, " ", "", " &", true);
code_.SetValue("FIELD_TYPE", mut_field_type);
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + Name(field)));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ +=
" flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
// Generate a comparison function for this field if it is a key.
if (field.key) {
GenKeyFieldMethods(field);
}
}
code_.SetValue("NATIVE_NAME", Name(struct_def));
GenOperatorNewDelete(struct_def);
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "FLATBUFFERS_STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
if (parser_.opts.gen_compare) GenCompareOperator(struct_def, "()");
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) { return; }
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) { code_ += ""; }
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) { code_ += ""; }
cur_name_space_ = ns;
}
const TypedFloatConstantGenerator float_const_gen_;
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 14,993 | this line can be lifted outside of the if-then? In fact only CreateString / CreateSharedString needs to be inside of it. | google-flatbuffers | java |
@@ -386,8 +386,8 @@ public class EpisodesApplyActionFragment extends Fragment implements Toolbar.OnM
mListView.setItemChecked(i, checked);
}
refreshToolbarState();
- toolbar.setTitle(getResources().getQuantityString(R.plurals.num_selected_label,
- checkedIds.size(), checkedIds.size()));
+// toolbar.setTitle(getResources().getQuantityString(R.plurals.num_selected_label,
+// checkedIds.size(), checkedIds.size()));
}
private void queueChecked() { | 1 | package de.danoeh.antennapod.dialog;
import android.os.Bundle;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.ListView;
import androidx.annotation.IdRes;
import androidx.annotation.NonNull;
import androidx.annotation.PluralsRes;
import androidx.annotation.StringRes;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.widget.Toolbar;
import androidx.collection.ArrayMap;
import androidx.fragment.app.Fragment;
import com.google.android.material.snackbar.Snackbar;
import com.leinardi.android.speeddial.SpeedDialView;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.core.dialog.DownloadRequestErrorDialogCreator;
import de.danoeh.antennapod.model.feed.FeedItem;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequestException;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.util.FeedItemPermutors;
import de.danoeh.antennapod.core.util.LongList;
import de.danoeh.antennapod.model.feed.SortOrder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class EpisodesApplyActionFragment extends Fragment implements Toolbar.OnMenuItemClickListener {
public static final String TAG = "EpisodeActionFragment";
public static final int ACTION_ADD_TO_QUEUE = 1;
public static final int ACTION_REMOVE_FROM_QUEUE = 2;
private static final int ACTION_MARK_PLAYED = 4;
private static final int ACTION_MARK_UNPLAYED = 8;
public static final int ACTION_DOWNLOAD = 16;
public static final int ACTION_DELETE = 32;
public static final int ACTION_ALL = ACTION_ADD_TO_QUEUE | ACTION_REMOVE_FROM_QUEUE
| ACTION_MARK_PLAYED | ACTION_MARK_UNPLAYED | ACTION_DOWNLOAD | ACTION_DELETE;
/**
* Specify an action (defined by #flag) 's UI bindings.
*
* Includes: the menu / action item and the actual logic
*/
private static class ActionBinding {
int flag;
@IdRes
final int actionItemId;
@NonNull
final Runnable action;
ActionBinding(int flag, @IdRes int actionItemId, @NonNull Runnable action) {
this.flag = flag;
this.actionItemId = actionItemId;
this.action = action;
}
}
private final List<? extends ActionBinding> actionBindings;
private final Map<Long, FeedItem> idMap = new ArrayMap<>();
private final List<FeedItem> episodes = new ArrayList<>();
private int actions;
private final List<String> titles = new ArrayList<>();
private final LongList checkedIds = new LongList();
private ListView mListView;
private ArrayAdapter<String> mAdapter;
private SpeedDialView mSpeedDialView;
private Toolbar toolbar;
public EpisodesApplyActionFragment() {
actionBindings = Arrays.asList(
new ActionBinding(ACTION_ADD_TO_QUEUE,
R.id.add_to_queue_batch, this::queueChecked),
new ActionBinding(ACTION_REMOVE_FROM_QUEUE,
R.id.remove_from_queue_batch, this::removeFromQueueChecked),
new ActionBinding(ACTION_MARK_PLAYED,
R.id.mark_read_batch, this::markedCheckedPlayed),
new ActionBinding(ACTION_MARK_UNPLAYED,
R.id.mark_unread_batch, this::markedCheckedUnplayed),
new ActionBinding(ACTION_DOWNLOAD,
R.id.download_batch, this::downloadChecked),
new ActionBinding(ACTION_DELETE,
R.id.delete_batch, this::deleteChecked)
);
}
public static EpisodesApplyActionFragment newInstance(List<FeedItem> items, int actions) {
EpisodesApplyActionFragment f = new EpisodesApplyActionFragment();
f.episodes.addAll(items);
for (FeedItem episode : items) {
f.idMap.put(episode.getId(), episode);
}
f.actions = actions;
return f;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setRetainInstance(true);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.episodes_apply_action_fragment, container, false);
toolbar = view.findViewById(R.id.toolbar);
toolbar.inflateMenu(R.menu.episodes_apply_action_options);
toolbar.setNavigationOnClickListener(v -> getParentFragmentManager().popBackStack());
toolbar.setOnMenuItemClickListener(this);
refreshToolbarState();
mListView = view.findViewById(android.R.id.list);
mListView.setChoiceMode(ListView.CHOICE_MODE_MULTIPLE);
mListView.setOnItemClickListener((listView, view1, position, rowId) -> {
long id = episodes.get(position).getId();
if (checkedIds.contains(id)) {
checkedIds.remove(id);
} else {
checkedIds.add(id);
}
refreshCheckboxes();
});
mListView.setOnItemLongClickListener((adapterView, view12, position, id) -> {
new AlertDialog.Builder(getActivity())
.setItems(R.array.batch_long_press_options, (dialogInterface, item) -> {
int direction;
if (item == 0) {
direction = -1;
} else {
direction = 1;
}
int currentPosition = position + direction;
while (currentPosition >= 0 && currentPosition < episodes.size()) {
long id1 = episodes.get(currentPosition).getId();
if (!checkedIds.contains(id1)) {
checkedIds.add(id1);
}
currentPosition += direction;
}
refreshCheckboxes();
}).show();
return true;
});
titles.clear();
for (FeedItem episode : episodes) {
titles.add(episode.getTitle());
}
mAdapter = new ArrayAdapter<>(getActivity(),
R.layout.simple_list_item_multiple_choice_on_start, titles);
mListView.setAdapter(mAdapter);
// Init action UI (via a FAB Speed Dial)
mSpeedDialView = view.findViewById(R.id.fabSD);
mSpeedDialView.inflate(R.menu.episodes_apply_action_speeddial);
// show only specified actions, and bind speed dial UIs to the actual logic
for (ActionBinding binding : actionBindings) {
if ((actions & binding.flag) == 0) {
mSpeedDialView.removeActionItemById(binding.actionItemId);
}
}
mSpeedDialView.setOnChangeListener(new SpeedDialView.OnChangeListener() {
@Override
public boolean onMainActionSelected() {
return false;
}
@Override
public void onToggleChanged(boolean open) {
if (open && checkedIds.size() == 0) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(R.string.no_items_selected,
Snackbar.LENGTH_SHORT);
mSpeedDialView.close();
}
}
});
mSpeedDialView.setOnActionSelectedListener(actionItem -> {
ActionBinding selectedBinding = null;
for (ActionBinding binding : actionBindings) {
if (actionItem.getId() == binding.actionItemId) {
selectedBinding = binding;
break;
}
}
if (selectedBinding != null) {
selectedBinding.action.run();
} else {
Log.e(TAG, "Unrecognized speed dial action item. Do nothing. id=" + actionItem.getId());
}
return true;
});
refreshCheckboxes();
return view;
}
public void refreshToolbarState() {
MenuItem selectAllItem = toolbar.getMenu().findItem(R.id.select_toggle);
if (checkedIds.size() == episodes.size()) {
selectAllItem.setIcon(R.drawable.ic_select_none);
selectAllItem.setTitle(R.string.deselect_all_label);
} else {
selectAllItem.setIcon(R.drawable.ic_select_all);
selectAllItem.setTitle(R.string.select_all_label);
}
}
private static final Map<Integer, SortOrder> menuItemIdToSortOrder;
static {
Map<Integer, SortOrder> map = new ArrayMap<>();
map.put(R.id.sort_title_a_z, SortOrder.EPISODE_TITLE_A_Z);
map.put(R.id.sort_title_z_a, SortOrder.EPISODE_TITLE_Z_A);
map.put(R.id.sort_date_new_old, SortOrder.DATE_NEW_OLD);
map.put(R.id.sort_date_old_new, SortOrder.DATE_OLD_NEW);
map.put(R.id.sort_duration_long_short, SortOrder.DURATION_LONG_SHORT);
map.put(R.id.sort_duration_short_long, SortOrder.DURATION_SHORT_LONG);
menuItemIdToSortOrder = Collections.unmodifiableMap(map);
}
@Override
public boolean onMenuItemClick(MenuItem item) {
@StringRes int resId = 0;
switch (item.getItemId()) {
case R.id.select_options:
return true;
case R.id.select_toggle:
if (checkedIds.size() == episodes.size()) {
checkNone();
} else {
checkAll();
}
return true;
case R.id.check_all:
checkAll();
resId = R.string.selected_all_label;
break;
case R.id.check_none:
checkNone();
resId = R.string.deselected_all_label;
break;
case R.id.check_played:
checkPlayed(true);
resId = R.string.selected_played_label;
break;
case R.id.check_unplayed:
checkPlayed(false);
resId = R.string.selected_unplayed_label;
break;
case R.id.check_downloaded:
checkDownloaded(true);
resId = R.string.selected_downloaded_label;
break;
case R.id.check_not_downloaded:
checkDownloaded(false);
resId = R.string.selected_not_downloaded_label;
break;
case R.id.check_queued:
checkQueued(true);
resId = R.string.selected_queued_label;
break;
case R.id.check_not_queued:
checkQueued(false);
resId = R.string.selected_not_queued_label;
break;
case R.id.check_has_media:
checkWithMedia();
resId = R.string.selected_has_media_label;
break;
default: // handle various sort options
SortOrder sortOrder = menuItemIdToSortOrder.get(item.getItemId());
if (sortOrder != null) {
sort(sortOrder);
return true;
}
}
if (resId != 0) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(resId, Snackbar.LENGTH_SHORT);
return true;
} else {
return false;
}
}
private void sort(@NonNull SortOrder sortOrder) {
FeedItemPermutors.getPermutor(sortOrder)
.reorder(episodes);
refreshTitles();
refreshCheckboxes();
}
private void checkAll() {
for (FeedItem episode : episodes) {
if (!checkedIds.contains(episode.getId())) {
checkedIds.add(episode.getId());
}
}
refreshCheckboxes();
}
private void checkNone() {
checkedIds.clear();
refreshCheckboxes();
}
private void checkPlayed(boolean isPlayed) {
for (FeedItem episode : episodes) {
if (episode.isPlayed() == isPlayed) {
if (!checkedIds.contains(episode.getId())) {
checkedIds.add(episode.getId());
}
} else {
if (checkedIds.contains(episode.getId())) {
checkedIds.remove(episode.getId());
}
}
}
refreshCheckboxes();
}
private void checkDownloaded(boolean isDownloaded) {
for (FeedItem episode : episodes) {
if (episode.hasMedia() && episode.getMedia().isDownloaded() == isDownloaded) {
if (!checkedIds.contains(episode.getId())) {
checkedIds.add(episode.getId());
}
} else {
if (checkedIds.contains(episode.getId())) {
checkedIds.remove(episode.getId());
}
}
}
refreshCheckboxes();
}
private void checkQueued(boolean isQueued) {
for (FeedItem episode : episodes) {
if (episode.isTagged(FeedItem.TAG_QUEUE) == isQueued) {
checkedIds.add(episode.getId());
} else {
checkedIds.remove(episode.getId());
}
}
refreshCheckboxes();
}
private void checkWithMedia() {
for (FeedItem episode : episodes) {
if (episode.hasMedia()) {
checkedIds.add(episode.getId());
} else {
checkedIds.remove(episode.getId());
}
}
refreshCheckboxes();
}
private void refreshTitles() {
titles.clear();
for (FeedItem episode : episodes) {
titles.add(episode.getTitle());
}
mAdapter.notifyDataSetChanged();
}
private void refreshCheckboxes() {
for (int i = 0; i < episodes.size(); i++) {
FeedItem episode = episodes.get(i);
boolean checked = checkedIds.contains(episode.getId());
mListView.setItemChecked(i, checked);
}
refreshToolbarState();
toolbar.setTitle(getResources().getQuantityString(R.plurals.num_selected_label,
checkedIds.size(), checkedIds.size()));
}
private void queueChecked() {
// Check if an episode actually contains any media files before adding it to queue
LongList toQueue = new LongList(checkedIds.size());
for (FeedItem episode : episodes) {
if (checkedIds.contains(episode.getId()) && episode.hasMedia()) {
toQueue.add(episode.getId());
}
}
DBWriter.addQueueItem(getActivity(), true, toQueue.toArray());
close(R.plurals.added_to_queue_batch_label, toQueue.size());
}
private void removeFromQueueChecked() {
DBWriter.removeQueueItem(getActivity(), true, checkedIds.toArray());
close(R.plurals.removed_from_queue_batch_label, checkedIds.size());
}
private void markedCheckedPlayed() {
DBWriter.markItemPlayed(FeedItem.PLAYED, checkedIds.toArray());
close(R.plurals.marked_read_batch_label, checkedIds.size());
}
private void markedCheckedUnplayed() {
DBWriter.markItemPlayed(FeedItem.UNPLAYED, checkedIds.toArray());
close(R.plurals.marked_unread_batch_label, checkedIds.size());
}
private void downloadChecked() {
// download the check episodes in the same order as they are currently displayed
List<FeedItem> toDownload = new ArrayList<>(checkedIds.size());
for (FeedItem episode : episodes) {
if (checkedIds.contains(episode.getId()) && episode.hasMedia() && !episode.getFeed().isLocalFeed()) {
toDownload.add(episode);
}
}
try {
DownloadRequester.getInstance().downloadMedia(getActivity(), true, toDownload.toArray(new FeedItem[0]));
} catch (DownloadRequestException e) {
e.printStackTrace();
DownloadRequestErrorDialogCreator.newRequestErrorDialog(getActivity(), e.getMessage());
}
close(R.plurals.downloading_batch_label, toDownload.size());
}
private void deleteChecked() {
int countHasMedia = 0;
int countNoMedia = 0;
for (long id : checkedIds.toArray()) {
FeedItem episode = idMap.get(id);
if (episode.hasMedia() && episode.getMedia().isDownloaded()) {
countHasMedia++;
DBWriter.deleteFeedMediaOfItem(getActivity(), episode.getMedia().getId());
} else {
countNoMedia++;
}
}
closeMore(R.plurals.deleted_multi_episode_batch_label, countNoMedia, countHasMedia);
}
private void close(@PluralsRes int msgId, int numItems) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(
getResources().getQuantityString(msgId, numItems, numItems), Snackbar.LENGTH_LONG);
getActivity().getSupportFragmentManager().popBackStack();
}
private void closeMore(@PluralsRes int msgId, int countNoMedia, int countHasMedia) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(
getResources().getQuantityString(msgId,
(countHasMedia + countNoMedia),
(countHasMedia + countNoMedia), countHasMedia),
Snackbar.LENGTH_LONG);
getActivity().getSupportFragmentManager().popBackStack();
}
}
| 1 | 20,033 | I think the `EpisodesApplyActionFragment` is unused now. So please delete it :) | AntennaPod-AntennaPod | java |
@@ -863,7 +863,7 @@ module Bolt
end
define('--log-level LEVEL',
"Set the log level for the console. Available options are",
- "debug, info, notice, warn, error, fatal, any.") do |level|
+ "trace, debug, info, warn, error, fatal, any.") do |level|
@options[:log] = { 'console' => { 'level' => level } }
end
define('--plugin PLUGIN', 'Select the plugin to use') do |plug| | 1 | # frozen_string_literal: true
# Note this file includes very few 'requires' because it expects to be used from the CLI.
require 'optparse'
module Bolt
class BoltOptionParser < OptionParser
OPTIONS = { inventory: %w[targets query rerun description],
authentication: %w[user password password-prompt private-key host-key-check ssl ssl-verify],
escalation: %w[run-as sudo-password sudo-password-prompt sudo-executable],
run_context: %w[concurrency inventoryfile save-rerun cleanup],
global_config_setters: %w[modulepath project configfile],
transports: %w[transport connect-timeout tty native-ssh ssh-command copy-command],
display: %w[format color verbose trace],
global: %w[help version debug log-level] }.freeze
ACTION_OPTS = OPTIONS.values.flatten.freeze
def get_help_text(subcommand, action = nil)
case subcommand
when 'apply'
{ flags: ACTION_OPTS + %w[noop execute compile-concurrency hiera-config],
banner: APPLY_HELP }
when 'command'
case action
when 'run'
{ flags: ACTION_OPTS + %w[env-var],
banner: COMMAND_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: COMMAND_HELP }
end
when 'file'
case action
when 'upload'
{ flags: ACTION_OPTS + %w[tmpdir],
banner: FILE_UPLOAD_HELP }
when 'download'
{ flags: ACTION_OPTS,
banner: FILE_DOWNLOAD_HELP }
else
{ flags: OPTIONS[:global],
banner: FILE_HELP }
end
when 'inventory'
case action
when 'show'
{ flags: OPTIONS[:inventory] + OPTIONS[:global] + %w[format inventoryfile boltdir configfile detail],
banner: INVENTORY_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: INVENTORY_HELP }
end
when 'group'
case action
when 'show'
{ flags: OPTIONS[:global] + %w[format inventoryfile boltdir configfile],
banner: GROUP_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: GROUP_HELP }
end
when 'plan'
case action
when 'convert'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PLAN_CONVERT_HELP }
when 'new'
{ flags: OPTIONS[:global] + %w[configfile project],
banner: PLAN_NEW_HELP }
when 'run'
{ flags: ACTION_OPTS + %w[params compile-concurrency tmpdir hiera-config],
banner: PLAN_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: PLAN_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: PLAN_HELP }
end
when 'project'
case action
when 'init'
{ flags: OPTIONS[:global] + %w[modules],
banner: PROJECT_INIT_HELP }
when 'migrate'
{ flags: OPTIONS[:global] + %w[inventoryfile boltdir configfile],
banner: PROJECT_MIGRATE_HELP }
else
{ flags: OPTIONS[:global],
banner: PROJECT_HELP }
end
when 'puppetfile'
case action
when 'install'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[puppetfile],
banner: PUPPETFILE_INSTALL_HELP }
when 'show-modules'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PUPPETFILE_SHOWMODULES_HELP }
when 'generate-types'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PUPPETFILE_GENERATETYPES_HELP }
else
{ flags: OPTIONS[:global],
banner: PUPPETFILE_HELP }
end
when 'script'
case action
when 'run'
{ flags: ACTION_OPTS + %w[tmpdir env-var],
banner: SCRIPT_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: SCRIPT_HELP }
end
when 'secret'
case action
when 'createkeys'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin force],
banner: SECRET_CREATEKEYS_HELP }
when 'decrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_DECRYPT_HELP }
when 'encrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_ENCRYPT_HELP }
else
{ flags: OPTIONS[:global],
banner: SECRET_HELP }
end
when 'task'
case action
when 'run'
{ flags: ACTION_OPTS + %w[params tmpdir noop],
banner: TASK_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: TASK_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: TASK_HELP }
end
else
{ flags: OPTIONS[:global],
banner: BANNER }
end
end
BANNER = <<~HELP
NAME
bolt
USAGE
bolt <subcommand> [action] [options]
DESCRIPTION
Bolt is an orchestration tool that automates the manual work it takes to
maintain your infrastructure.
SUBCOMMANDS
apply Apply Puppet manifest code
command Run a command remotely
file Copy files between the controller and targets
group Show the list of groups in the inventory
inventory Show the list of targets an action would run on
plan Convert, create, show, and run Bolt plans
project Create and migrate Bolt projects
puppetfile Install and list modules and generate type references
script Upload a local script and run it remotely
secret Create encryption keys and encrypt and decrypt values
task Show and run Bolt tasks
HELP
APPLY_HELP = <<~HELP
NAME
apply
USAGE
bolt apply [manifest.pp] [options]
DESCRIPTION
Apply Puppet manifest code on the specified targets.
EXAMPLES
bolt apply manifest.pp -t target
bolt apply -e "file { '/etc/puppetlabs': ensure => present }" -t target
HELP
COMMAND_HELP = <<~HELP
NAME
command
USAGE
bolt command <action> [options]
DESCRIPTION
Run a command on the specified targets.
ACTIONS
run Run a command on the specified targets.
HELP
COMMAND_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt command run <command> [options]
DESCRIPTION
Run a command on the specified targets.
EXAMPLES
bolt command run 'uptime' -t target1,target2
HELP
FILE_HELP = <<~HELP
NAME
file
USAGE
bolt file <action> [options]
DESCRIPTION
Copy files and directories between the controller and targets
ACTIONS
download Download a file or directory to the controller
upload Upload a local file or directory from the controller
HELP
FILE_DOWNLOAD_HELP = <<~HELP
NAME
download
USAGE
bolt file download <src> <dest> [options]
DESCRIPTION
Download a file or directory from one or more targets.
Downloaded files and directories are saved to the a subdirectory
matching the target's name under the destination directory. The
destination directory is expanded relative to the downloads
subdirectory of the project directory.
EXAMPLES
bolt file download /etc/ssh_config ssh_config -t all
HELP
FILE_UPLOAD_HELP = <<~HELP
NAME
upload
USAGE
bolt file upload <src> <dest> [options]
DESCRIPTION
Upload a local file or directory.
EXAMPLES
bolt file upload /tmp/source /etc/profile.d/login.sh -t target1
HELP
GROUP_HELP = <<~HELP
NAME
group
USAGE
bolt group <action> [options]
DESCRIPTION
Show the list of groups in the inventory.
ACTIONS
show Show the list of groups in the inventory
HELP
GROUP_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt group show [options]
DESCRIPTION
Show the list of groups in the inventory.
HELP
INVENTORY_HELP = <<~HELP
NAME
inventory
USAGE
bolt inventory <action> [options]
DESCRIPTION
Show the list of targets an action would run on.
ACTIONS
show Show the list of targets an action would run on
HELP
INVENTORY_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt inventory show [options]
DESCRIPTION
Show the list of targets an action would run on.
HELP
PLAN_HELP = <<~HELP
NAME
plan
USAGE
bolt plan <action> [parameters] [options]
DESCRIPTION
Convert, create, show, and run Bolt plans.
ACTIONS
convert Convert a YAML plan to a Bolt plan
new Create a new plan in the current project
run Run a plan on the specified targets
show Show available plans and plan documentation
HELP
PLAN_CONVERT_HELP = <<~HELP
NAME
convert
USAGE
bolt plan convert <path> [options]
DESCRIPTION
Convert a YAML plan to a Bolt plan.
Converting a YAML plan may result in a plan that is syntactically
correct but has different behavior. Always verify a converted plan's
functionality.
EXAMPLES
bolt plan convert path/to/plan/myplan.yaml
HELP
PLAN_NEW_HELP = <<~HELP
NAME
new
USAGE
bolt plan new <plan> [options]
DESCRIPTION
Create a new plan in the current project.
EXAMPLES
bolt plan new myproject::myplan
HELP
PLAN_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt plan run <plan> [parameters] [options]
DESCRIPTION
Run a plan on the specified targets.
EXAMPLES
bolt plan run canary --targets target1,target2 command=hostname
HELP
PLAN_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt plan show [plan] [options]
DESCRIPTION
Show available plans and plan documentation.
Omitting the name of a plan will display a list of plans available
in the Bolt project.
Providing the name of a plan will display detailed documentation for
the plan, including a list of available parameters.
EXAMPLES
Display a list of available tasks
bolt plan show
Display documentation for the canary task
bolt plan show aggregate::count
HELP
PROJECT_HELP = <<~HELP
NAME
project
USAGE
bolt project <action> [options]
DESCRIPTION
Create and migrate Bolt projects
ACTIONS
init Create a new Bolt project
migrate Migrate a Bolt project to the latest version
HELP
PROJECT_INIT_HELP = <<~HELP
NAME
init
USAGE
bolt project init [name] [options]
DESCRIPTION
Create a new Bolt project in the current working directory.
Specify a name for the Bolt project. Defaults to the basename of the current working directory.
EXAMPLES
Create a new Bolt project using the directory as the project name.
bolt project init
Create a new Bolt project with a specified name.
bolt project init myproject
Create a new Bolt project with existing modules.
bolt project init --modules puppetlabs-apt,puppetlabs-ntp
HELP
PROJECT_MIGRATE_HELP = <<~HELP
NAME
migrate
USAGE
bolt project migrate [options]
DESCRIPTION
Migrate a Bolt project to the latest version.
Loads a Bolt project's inventory file and migrates it to the latest version. The
inventory file is modified in place and will not preserve comments or formatting.
HELP
PUPPETFILE_HELP = <<~HELP
NAME
puppetfile
USAGE
bolt puppetfile <action> [options]
DESCRIPTION
Install and list modules and generate type references
ACTIONS
generate-types Generate type references to register in plans
install Install modules from a Puppetfile into a project
show-modules List modules available to the Bolt project
HELP
PUPPETFILE_GENERATETYPES_HELP = <<~HELP
NAME
generate-types
USAGE
bolt puppetfile generate-types [options]
DESCRIPTION
Generate type references to register in plans.
HELP
PUPPETFILE_INSTALL_HELP = <<~HELP
NAME
install
USAGE
bolt puppetfile install [options]
DESCRIPTION
Install modules from a Puppetfile into a project
HELP
PUPPETFILE_SHOWMODULES_HELP = <<~HELP
NAME
show-modules
USAGE
bolt puppetfile show-modules [options]
DESCRIPTION
List modules available to the Bolt project.
HELP
SCRIPT_HELP = <<~HELP
NAME
script
USAGE
bolt script <action> [options]
DESCRIPTION
Run a script on the specified targets.
ACTIONS
run Run a script on the specified targets.
HELP
SCRIPT_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt script run <script> [arguments] [options]
DESCRIPTION
Run a script on the specified targets.
Arguments passed to a script are passed literally and are not interpolated
by the shell. Any arguments containing spaces or special characters should
be quoted.
EXAMPLES
bolt script run myscript.sh 'echo hello' --targets target1,target2
HELP
SECRET_HELP = <<~HELP
NAME
secret
USAGE
bolt secret <action> [options]
DESCRIPTION
Create encryption keys and encrypt and decrypt values.
ACTIONS
createkeys Create new encryption keys
encrypt Encrypt a value
decrypt Decrypt a value
HELP
SECRET_CREATEKEYS_HELP = <<~HELP
NAME
createkeys
USAGE
bolt secret createkeys [options]
DESCRIPTION
Create new encryption keys.
HELP
SECRET_DECRYPT_HELP = <<~HELP
NAME
decrypt
USAGE
bolt secret decrypt <ciphertext> [options]
DESCRIPTION
Decrypt a value.
HELP
SECRET_ENCRYPT_HELP = <<~HELP
NAME
encrypt
USAGE
bolt secret encrypt <plaintext> [options]
DESCRIPTION
Encrypt a value.
HELP
TASK_HELP = <<~HELP
NAME
task
USAGE
bolt task <action> [options]
DESCRIPTION
Show and run Bolt tasks.
ACTIONS
run Run a Bolt task
show Show available tasks and task documentation
HELP
TASK_RUN_HELP = <<~HELP
NAME
run
USAGE
bolt task run <task> [parameters] [options]
DESCRIPTION
Run a task on the specified targets.
Parameters take the form parameter=value.
EXAMPLES
bolt task run package --targets target1,target2 action=status name=bash
HELP
TASK_SHOW_HELP = <<~HELP
NAME
show
USAGE
bolt task show [task] [options]
DESCRIPTION
Show available tasks and task documentation.
Omitting the name of a task will display a list of tasks available
in the Bolt project.
Providing the name of a task will display detailed documentation for
the task, including a list of available parameters.
EXAMPLES
Display a list of available tasks
bolt task show
Display documentation for the canary task
bolt task show canary
HELP
attr_reader :deprecations
def initialize(options)
super()
@options = options
@deprecations = []
separator "\nINVENTORY OPTIONS"
define('-t', '--targets TARGETS',
'Identifies the targets of command.',
'Enter a comma-separated list of target URIs or group names.',
"Or read a target list from an input file '@<file>' or stdin '-'.",
'Example: --targets localhost,target_group,ssh://nix.com:23,winrm://windows.puppet.com',
'URI format is [protocol://]host[:port]',
"SSH is the default protocol; may be #{TRANSPORTS.keys.join(', ')}",
'For Windows targets, specify the winrm:// protocol if it has not be configured',
'For SSH, port defaults to `22`',
'For WinRM, port defaults to `5985` or `5986` based on the --[no-]ssl setting') do |targets|
@options[:targets] ||= []
@options[:targets] << get_arg_input(targets)
end
define('-q', '--query QUERY', 'Query PuppetDB to determine the targets') do |query|
@options[:query] = query
end
define('--rerun FILTER', 'Retry on targets from the last run',
"'all' all targets that were part of the last run.",
"'failure' targets that failed in the last run.",
"'success' targets that succeeded in the last run.") do |rerun|
@options[:rerun] = rerun
end
define('--noop', 'See what changes Bolt will make without actually executing the changes') do |_|
@options[:noop] = true
end
define('--description DESCRIPTION',
'Description to use for the job') do |description|
@options[:description] = description
end
define('--params PARAMETERS',
"Parameters to a task or plan as json, a json file '@<file>', or on stdin '-'") do |params|
@options[:task_options] = parse_params(params)
end
define('-e', '--execute CODE',
"Puppet manifest code to apply to the targets") do |code|
@options[:code] = code
end
define('--detail', 'Show resolved configuration for the targets') do |detail|
@options[:detail] = detail
end
separator "\nAUTHENTICATION OPTIONS"
define('-u', '--user USER', 'User to authenticate as') do |user|
@options[:user] = user
end
define('-p', '--password PASSWORD',
'Password to authenticate with') do |password|
@options[:password] = password
end
define('--password-prompt', 'Prompt for user to input password') do |_password|
$stderr.print "Please enter your password: "
@options[:password] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--private-key KEY', 'Path to private ssh key to authenticate with') do |key|
@options[:'private-key'] = File.expand_path(key)
end
define('--[no-]host-key-check', 'Check host keys with SSH') do |host_key_check|
@options[:'host-key-check'] = host_key_check
end
define('--[no-]ssl', 'Use SSL with WinRM') do |ssl|
@options[:ssl] = ssl
end
define('--[no-]ssl-verify', 'Verify remote host SSL certificate with WinRM') do |ssl_verify|
@options[:'ssl-verify'] = ssl_verify
end
separator "\nESCALATION OPTIONS"
define('--run-as USER', 'User to run as using privilege escalation') do |user|
@options[:'run-as'] = user
end
define('--sudo-password PASSWORD',
'Password for privilege escalation') do |password|
@options[:'sudo-password'] = password
end
define('--sudo-password-prompt', 'Prompt for user to input escalation password') do |_password|
$stderr.print "Please enter your privilege escalation password: "
@options[:'sudo-password'] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--sudo-executable EXEC', "Specify an executable for running as another user.",
"This option is experimental.") do |exec|
@options[:'sudo-executable'] = exec
end
separator "\nRUN CONTEXT OPTIONS"
define('-c', '--concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous connections') do |concurrency|
@options[:concurrency] = concurrency
end
define('--compile-concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous manifest block compiles (default: number of cores)') do |concurrency|
@options[:'compile-concurrency'] = concurrency
end
define('--[no-]cleanup',
'Whether to clean up temporary files created on targets') do |cleanup|
@options[:cleanup] = cleanup
end
define('-m', '--modulepath MODULES',
"List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'",
'Directories are case-sensitive') do |modulepath|
# When specified from the CLI, modulepath entries are relative to pwd
@options[:modulepath] = modulepath.split(File::PATH_SEPARATOR).map do |moduledir|
File.expand_path(moduledir)
end
end
define('--project PATH', '--boltdir PATH',
'Specify what project to load config from (default: autodiscovered from current working dir)') do |path|
@options[:boltdir] = path
end
define('--configfile PATH',
'Specify where to load config from (default: ~/.puppetlabs/bolt/bolt.yaml).',
'Directory containing bolt.yaml will be used as the project directory.') do |path|
@options[:configfile] = path
end
define('--hiera-config PATH',
'Specify where to load Hiera config from (default: ~/.puppetlabs/bolt/hiera.yaml)') do |path|
@options[:'hiera-config'] = File.expand_path(path)
end
define('-i', '--inventoryfile PATH',
'Specify where to load inventory from (default: ~/.puppetlabs/bolt/inventory.yaml)') do |path|
if ENV.include?(Bolt::Inventory::ENVIRONMENT_VAR)
raise Bolt::CLIError, "Cannot pass inventory file when #{Bolt::Inventory::ENVIRONMENT_VAR} is set"
end
@options[:inventoryfile] = Pathname.new(File.expand_path(path))
end
define('--puppetfile PATH',
'Specify a Puppetfile to use when installing modules. (default: ~/.puppetlabs/bolt/Puppetfile)',
'Modules are installed in the current project.') do |path|
@options[:puppetfile_path] = Pathname.new(File.expand_path(path))
end
define('--[no-]save-rerun', 'Whether to update the rerun file after this command.') do |save|
@options[:'save-rerun'] = save
end
separator "\nREMOTE ENVIRONMENT OPTIONS"
define('--env-var ENVIRONMENT_VARIABLES', 'Environment variables to set on the target') do |envvar|
unless envvar.include?('=')
raise Bolt::CLIError, "Environment variables must be specified using 'myenvvar=key' format"
end
@options[:env_vars] ||= {}
@options[:env_vars].store(*envvar.split('=', 2))
end
separator "\nTRANSPORT OPTIONS"
define('--transport TRANSPORT', TRANSPORTS.keys.map(&:to_s),
"Specify a default transport: #{TRANSPORTS.keys.join(', ')}") do |t|
@options[:transport] = t
end
define('--[no-]native-ssh', 'Whether to shell out to native SSH or use the net-ssh Ruby library.',
'This option is experimental') do |bool|
@options[:'native-ssh'] = bool
end
define('--ssh-command EXEC', "Executable to use instead of the net-ssh Ruby library. ",
"This option is experimental.") do |exec|
@options[:'ssh-command'] = exec
end
define('--copy-command EXEC', "Command to copy files to remote hosts if using native SSH. ",
"This option is experimental.") do |exec|
@options[:'copy-command'] = exec
end
define('--connect-timeout TIMEOUT', Integer, 'Connection timeout (defaults vary)') do |timeout|
@options[:'connect-timeout'] = timeout
end
define('--[no-]tty', 'Request a pseudo TTY on targets that support it') do |tty|
@options[:tty] = tty
end
define('--tmpdir DIR', 'The directory to upload and execute temporary files on the target') do |tmpdir|
@options[:tmpdir] = tmpdir
end
separator "\nDISPLAY OPTIONS"
define('--filter FILTER', 'Filter tasks and plans by a matching substring') do |filter|
unless /^[a-z0-9_:]+$/.match(filter)
msg = "Illegal characters in filter string '#{filter}'. Filters must match a legal "\
"task or plan name."
raise Bolt::CLIError, msg
end
@options[:filter] = filter
end
define('--format FORMAT', 'Output format to use: human or json') do |format|
@options[:format] = format
end
define('--[no-]color', 'Whether to show output in color') do |color|
@options[:color] = color
end
define('-v', '--[no-]verbose', 'Display verbose logging') do |value|
@options[:verbose] = value
end
define('--trace', 'Display error stack traces') do |_|
@options[:trace] = true
end
separator "\nADDITIONAL OPTIONS"
define('--modules MODULES',
'A comma-separated list of modules to install from the Puppet Forge',
'when initializing a project. Resolves and installs all dependencies.') do |modules|
@options[:modules] = modules.split(',')
end
define('--force', 'Overwrite existing key pairs') do |_force|
@options[:force] = true
end
separator "\nGLOBAL OPTIONS"
define('-h', '--help', 'Display help') do |_|
@options[:help] = true
end
define('--version', 'Display the version') do |_|
puts Bolt::VERSION
raise Bolt::CLIExit
end
define('--debug', 'Display debug logging') do |_|
@options[:debug] = true
# We don't actually set '--log-level debug' here, but once the options are evaluated by
# the config class the end result is the same.
msg = "Command line option '--debug' is deprecated, set '--log-level debug' instead."
@deprecations << { type: 'Using --debug instead of --log-level debug', msg: msg }
end
define('--log-level LEVEL',
"Set the log level for the console. Available options are",
"debug, info, notice, warn, error, fatal, any.") do |level|
@options[:log] = { 'console' => { 'level' => level } }
end
define('--plugin PLUGIN', 'Select the plugin to use') do |plug|
@options[:plugin] = plug
end
end
def remove_excluded_opts(option_list)
# Remove any options that are not available for the specified subcommand
top.list.delete_if do |opt|
opt.respond_to?(:switch_name) && !option_list.include?(opt.switch_name)
end
# Remove any separators if all options of that type have been removed
top.list.delete_if do |opt|
i = top.list.index(opt)
opt.is_a?(String) && top.list[i + 1].is_a?(String)
end
end
def update
help_text = get_help_text(@options[:subcommand], @options[:action])
# Update the banner according to the subcommand
self.banner = help_text[:banner]
# Builds the option list for the specified subcommand and removes all excluded
# options from the help text
remove_excluded_opts(help_text[:flags])
end
def parse_params(params)
json = get_arg_input(params)
JSON.parse(json)
rescue JSON::ParserError => e
raise Bolt::CLIError, "Unable to parse --params value as JSON: #{e}"
end
def get_arg_input(value)
if value.start_with?('@')
file = value.sub(/^@/, '')
read_arg_file(file)
elsif value == '-'
$stdin.read
else
value
end
end
def read_arg_file(file)
File.read(File.expand_path(file))
rescue StandardError => e
raise Bolt::FileError.new("Error attempting to read #{file}: #{e}", file)
end
end
end
| 1 | 15,709 | It seems like we should leave `notice` here since it can be configured in Bolt config files, and that's what sets the console config for messages from Puppet. For example, this will break plans that call `notice()` in their apply blocks. | puppetlabs-bolt | rb |
@@ -31,6 +31,7 @@ public enum CommonMetrics {
private Meter dbConnectionMeter;
private Meter flowFailMeter;
+ private Meter OOMwaitingJobMeter;
private AtomicLong dbConnectionTime = new AtomicLong(0L);
private MetricRegistry registry; | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.metrics;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import java.util.concurrent.atomic.AtomicLong;
/**
* This singleton class CommonMetrics is in charge of collecting varieties of metrics
* which are accessed in both web and exec modules. That said, these metrics will be
* exposed in both Web server and executor.
*/
public enum CommonMetrics {
INSTANCE;
private Meter dbConnectionMeter;
private Meter flowFailMeter;
private AtomicLong dbConnectionTime = new AtomicLong(0L);
private MetricRegistry registry;
CommonMetrics() {
registry = MetricsManager.INSTANCE.getRegistry();
setupAllMetrics();
}
private void setupAllMetrics() {
dbConnectionMeter = MetricsUtility.addMeter("DB-Connection-meter", registry);
flowFailMeter = MetricsUtility.addMeter("flow-fail-meter", registry);
MetricsUtility.addGauge("dbConnectionTime", registry, dbConnectionTime::get);
}
/**
* Mark the occurrence of an DB query event.
*/
public void markDBConnection() {
/*
* This method should be Thread Safe.
* Two reasons that we don't make this function call synchronized:
* 1). drop wizard metrics deals with concurrency internally;
* 2). mark is basically a math addition operation, which should not cause race condition issue.
*/
dbConnectionMeter.mark();
}
/**
* Mark flowFailMeter when a flow is considered as FAILED.
* This method could be called by Web Server or Executor, as they both detect flow failure.
*/
public void markFlowFail() {
flowFailMeter.mark();
}
public void setDBConnectionTime(long milliseconds) {
dbConnectionTime.set(milliseconds);
}
}
| 1 | 12,633 | This metrics is only exposed in executors. Should it be defined in azkaban.execapp.ExecMetrics instead? | azkaban-azkaban | java |
@@ -64,3 +64,14 @@ export function getEffectiveMaxDate ({ maxDate, includeDates }) {
return maxDate
}
}
+
+export function parseDate (value, { dateFormat, locale }) {
+ const m = moment(value, dateFormat, locale || moment.locale(), true)
+ return m.isValid() ? m : null
+}
+
+export function safeDateFormat (date, { dateFormat, locale }) {
+ return date && date.clone()
+ .locale(locale || moment.locale())
+ .format(Array.isArray(dateFormat) ? dateFormat[0] : dateFormat) || ''
+} | 1 | import moment from 'moment'
export function isSameDay (moment1, moment2) {
if (moment1 && moment2) {
return moment1.isSame(moment2, 'day')
} else {
return !moment1 && !moment2
}
}
export function isSameUtcOffset (moment1, moment2) {
if (moment1 && moment2) {
return moment1.utcOffset() === moment2.utcOffset()
} else {
return !moment1 && !moment2
}
}
export function isDayInRange (day, startDate, endDate) {
const before = startDate.clone().startOf('day').subtract(1, 'seconds')
const after = endDate.clone().startOf('day').add(1, 'seconds')
return day.clone().startOf('day').isBetween(before, after)
}
export function isDayDisabled (day, { minDate, maxDate, excludeDates, includeDates, filterDate } = {}) {
return (minDate && day.isBefore(minDate, 'day')) ||
(maxDate && day.isAfter(maxDate, 'day')) ||
(excludeDates && excludeDates.some(excludeDate => isSameDay(day, excludeDate))) ||
(includeDates && !includeDates.some(includeDate => isSameDay(day, includeDate))) ||
(filterDate && !filterDate(day.clone())) ||
false
}
export function allDaysDisabledBefore (day, unit, { minDate, includeDates } = {}) {
const dateBefore = day.clone().subtract(1, unit)
return (minDate && dateBefore.isBefore(minDate, unit)) ||
(includeDates && includeDates.every(includeDate => dateBefore.isBefore(includeDate, unit))) ||
false
}
export function allDaysDisabledAfter (day, unit, { maxDate, includeDates } = {}) {
const dateAfter = day.clone().add(1, unit)
return (maxDate && dateAfter.isAfter(maxDate, unit)) ||
(includeDates && includeDates.every(includeDate => dateAfter.isAfter(includeDate, unit))) ||
false
}
export function getEffectiveMinDate ({ minDate, includeDates }) {
if (includeDates && minDate) {
return moment.min(includeDates.filter(includeDate => minDate.isSameOrBefore(includeDate, 'day')))
} else if (includeDates) {
return moment.min(includeDates)
} else {
return minDate
}
}
export function getEffectiveMaxDate ({ maxDate, includeDates }) {
if (includeDates && maxDate) {
return moment.max(includeDates.filter(includeDate => maxDate.isSameOrAfter(includeDate, 'day')))
} else if (includeDates) {
return moment.max(includeDates)
} else {
return maxDate
}
}
| 1 | 5,970 | I originally factored out these functions from date_input. Currently, they are only used in datepicker, but they seemed more general purpose. If you decide we should keep the current date_input API (in case anyone is using it directly), then I think it makes sense to factor these out in order to be able to maintain consistency, but otherwise they could be inlined into datepicker.jsx if you prefer. | Hacker0x01-react-datepicker | js |
@@ -204,9 +204,10 @@ func (r *mutableStateTaskGeneratorImpl) generateDelayedWorkflowTasks(
switch startAttr.GetInitiator() {
case enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY:
workflowBackoffType = enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY
- case enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE,
- enumspb.CONTINUE_AS_NEW_INITIATOR_WORKFLOW:
+ case enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE, enumspb.CONTINUE_AS_NEW_INITIATOR_WORKFLOW:
workflowBackoffType = enumsspb.WORKFLOW_BACKOFF_TYPE_CRON
+ case enumspb.CONTINUE_AS_NEW_INITIATOR_UNSPECIFIED:
+ workflowBackoffType = enumsspb.WORKFLOW_BACKOFF_TYPE_UNSPECIFIED
default:
return serviceerror.NewInternal(fmt.Sprintf("unknown initiator: %v", startAttr.GetInitiator()))
} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination mutableStateTaskGenerator_mock.go
package history
import (
"fmt"
"time"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
"go.temporal.io/api/serviceerror"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/common/cache"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
)
type (
mutableStateTaskGenerator interface {
generateWorkflowStartTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error
generateWorkflowCloseTasks(
now time.Time,
) error
generateRecordWorkflowStartedTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error
generateDelayedWorkflowTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error
generateScheduleWorkflowTaskTasks(
now time.Time,
workflowTaskScheduleID int64,
) error
generateStartWorkflowTaskTasks(
now time.Time,
workflowTaskScheduleID int64,
) error
generateActivityTransferTasks(
now time.Time,
event *historypb.HistoryEvent,
) error
generateActivityRetryTasks(
activityScheduleID int64,
) error
generateChildWorkflowTasks(
now time.Time,
event *historypb.HistoryEvent,
) error
generateRequestCancelExternalTasks(
now time.Time,
event *historypb.HistoryEvent,
) error
generateSignalExternalTasks(
now time.Time,
event *historypb.HistoryEvent,
) error
generateWorkflowSearchAttrTasks(
now time.Time,
) error
generateWorkflowResetTasks(
now time.Time,
) error
// these 2 APIs should only be called when mutable state transaction is being closed
generateActivityTimerTasks(
now time.Time,
) error
generateUserTimerTasks(
now time.Time,
) error
}
mutableStateTaskGeneratorImpl struct {
namespaceCache cache.NamespaceCache
logger log.Logger
mutableState mutableState
}
)
const defaultWorkflowRetentionInDays int32 = 1
var _ mutableStateTaskGenerator = (*mutableStateTaskGeneratorImpl)(nil)
func newMutableStateTaskGenerator(
namespaceCache cache.NamespaceCache,
logger log.Logger,
mutableState mutableState,
) *mutableStateTaskGeneratorImpl {
return &mutableStateTaskGeneratorImpl{
namespaceCache: namespaceCache,
logger: logger,
mutableState: mutableState,
}
}
func (r *mutableStateTaskGeneratorImpl) generateWorkflowStartTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error {
attr := startEvent.GetWorkflowExecutionStartedEventAttributes()
firstWorkflowTaskDelayDuration := timestamp.DurationValue(attr.GetFirstWorkflowTaskBackoff())
executionInfo := r.mutableState.GetExecutionInfo()
startVersion := startEvent.GetVersion()
runTimeoutDuration := timestamp.DurationValue(executionInfo.WorkflowRunTimeout)
runTimeoutDuration = runTimeoutDuration + firstWorkflowTaskDelayDuration
workflowExpirationTimestamp := now.Add(runTimeoutDuration)
wfExpTime := timestamp.TimeValue(executionInfo.WorkflowExpirationTime)
if !wfExpTime.IsZero() && workflowExpirationTimestamp.After(wfExpTime) {
workflowExpirationTimestamp = wfExpTime
}
r.mutableState.AddTimerTasks(&persistence.WorkflowTimeoutTask{
// TaskID is set by shard
VisibilityTimestamp: workflowExpirationTimestamp,
Version: startVersion,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateWorkflowCloseTasks(
now time.Time,
) error {
currentVersion := r.mutableState.GetCurrentVersion()
executionInfo := r.mutableState.GetExecutionInfo()
r.mutableState.AddTransferTasks(&persistence.CloseExecutionTask{
// TaskID is set by shard
VisibilityTimestamp: now,
Version: currentVersion,
})
retentionInDays := defaultWorkflowRetentionInDays
namespaceEntry, err := r.namespaceCache.GetNamespaceByID(executionInfo.NamespaceId)
switch err.(type) {
case nil:
retentionInDays = namespaceEntry.GetRetentionDays(executionInfo.WorkflowId)
case *serviceerror.NotFound:
// namespace is not accessible, use default value above
default:
return err
}
retentionDuration := time.Duration(retentionInDays) * time.Hour * 24
r.mutableState.AddTimerTasks(&persistence.DeleteHistoryEventTask{
// TaskID is set by shard
VisibilityTimestamp: now.Add(retentionDuration),
Version: currentVersion,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateDelayedWorkflowTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error {
startVersion := startEvent.GetVersion()
startAttr := startEvent.GetWorkflowExecutionStartedEventAttributes()
workflowTaskBackoffDuration := timestamp.DurationValue(startAttr.GetFirstWorkflowTaskBackoff())
executionTimestamp := now.Add(workflowTaskBackoffDuration)
var workflowBackoffType enumsspb.WorkflowBackoffType
switch startAttr.GetInitiator() {
case enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY:
workflowBackoffType = enumsspb.WORKFLOW_BACKOFF_TYPE_RETRY
case enumspb.CONTINUE_AS_NEW_INITIATOR_CRON_SCHEDULE,
enumspb.CONTINUE_AS_NEW_INITIATOR_WORKFLOW:
workflowBackoffType = enumsspb.WORKFLOW_BACKOFF_TYPE_CRON
default:
return serviceerror.NewInternal(fmt.Sprintf("unknown initiator: %v", startAttr.GetInitiator()))
}
r.mutableState.AddTimerTasks(&persistence.WorkflowBackoffTimerTask{
// TaskID is set by shard
// TODO EventID seems not used at all
VisibilityTimestamp: executionTimestamp,
WorkflowBackoffType: workflowBackoffType,
Version: startVersion,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateRecordWorkflowStartedTasks(
now time.Time,
startEvent *historypb.HistoryEvent,
) error {
startVersion := startEvent.GetVersion()
r.mutableState.AddTransferTasks(&persistence.RecordWorkflowStartedTask{
// TaskID is set by shard
VisibilityTimestamp: now,
Version: startVersion,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateScheduleWorkflowTaskTasks(
now time.Time,
workflowTaskScheduleID int64,
) error {
executionInfo := r.mutableState.GetExecutionInfo()
workflowTask, ok := r.mutableState.GetWorkflowTaskInfo(
workflowTaskScheduleID,
)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending workflow task: %v", workflowTaskScheduleID))
}
r.mutableState.AddTransferTasks(&persistence.WorkflowTask{
// TaskID is set by shard
VisibilityTimestamp: now,
NamespaceID: executionInfo.NamespaceId,
TaskQueue: workflowTask.TaskQueue.GetName(),
ScheduleID: workflowTask.ScheduleID,
Version: workflowTask.Version,
})
if r.mutableState.IsStickyTaskQueueEnabled() {
scheduledTime := timestamp.TimeValue(workflowTask.ScheduledTimestamp)
scheduleToStartTimeout := timestamp.DurationValue(r.mutableState.GetExecutionInfo().StickyScheduleToStartTimeout)
r.mutableState.AddTimerTasks(&persistence.WorkflowTaskTimeoutTask{
// TaskID is set by shard
VisibilityTimestamp: scheduledTime.Add(scheduleToStartTimeout),
TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START,
EventID: workflowTask.ScheduleID,
ScheduleAttempt: workflowTask.Attempt,
Version: workflowTask.Version,
})
}
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateStartWorkflowTaskTasks(
_ time.Time,
workflowTaskScheduleID int64,
) error {
workflowTask, ok := r.mutableState.GetWorkflowTaskInfo(
workflowTaskScheduleID,
)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending workflowTaskInfo: %v", workflowTaskScheduleID))
}
startedTime := timestamp.TimeValue(workflowTask.StartedTimestamp)
workflowTaskTimeout := timestamp.DurationValue(workflowTask.WorkflowTaskTimeout)
r.mutableState.AddTimerTasks(&persistence.WorkflowTaskTimeoutTask{
// TaskID is set by shard
VisibilityTimestamp: startedTime.Add(workflowTaskTimeout),
TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE,
EventID: workflowTask.ScheduleID,
ScheduleAttempt: workflowTask.Attempt,
Version: workflowTask.Version,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateActivityTransferTasks(
now time.Time,
event *historypb.HistoryEvent,
) error {
attr := event.GetActivityTaskScheduledEventAttributes()
activityScheduleID := event.GetEventId()
activityInfo, ok := r.mutableState.GetActivityInfo(activityScheduleID)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending activity: %v", activityScheduleID))
}
var targetNamespaceID string
var err error
if activityInfo.NamespaceId != "" {
targetNamespaceID = activityInfo.NamespaceId
} else {
// TODO remove this block after Mar, 1th, 2020
// previously, NamespaceID in activity info is not used, so need to get
// schedule event from DB checking whether activity to be scheduled
// belongs to this namespace
targetNamespaceID, err = r.getTargetNamespaceID(attr.GetNamespace())
if err != nil {
return err
}
}
r.mutableState.AddTransferTasks(&persistence.ActivityTask{
// TaskID is set by shard
VisibilityTimestamp: now,
NamespaceID: targetNamespaceID,
TaskQueue: activityInfo.TaskQueue,
ScheduleID: activityInfo.ScheduleId,
Version: activityInfo.Version,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateActivityRetryTasks(
activityScheduleID int64,
) error {
ai, ok := r.mutableState.GetActivityInfo(activityScheduleID)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending activity: %v", activityScheduleID))
}
r.mutableState.AddTimerTasks(&persistence.ActivityRetryTimerTask{
// TaskID is set by shard
Version: ai.Version,
VisibilityTimestamp: *ai.ScheduledTime,
EventID: ai.ScheduleId,
Attempt: ai.Attempt,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateChildWorkflowTasks(
now time.Time,
event *historypb.HistoryEvent,
) error {
attr := event.GetStartChildWorkflowExecutionInitiatedEventAttributes()
childWorkflowScheduleID := event.GetEventId()
childWorkflowTargetNamespace := attr.GetNamespace()
childWorkflowInfo, ok := r.mutableState.GetChildExecutionInfo(childWorkflowScheduleID)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending child workflow: %v", childWorkflowScheduleID))
}
targetNamespaceID, err := r.getTargetNamespaceID(childWorkflowTargetNamespace)
if err != nil {
return err
}
r.mutableState.AddTransferTasks(&persistence.StartChildExecutionTask{
// TaskID is set by shard
VisibilityTimestamp: now,
TargetNamespaceID: targetNamespaceID,
TargetWorkflowID: childWorkflowInfo.StartedWorkflowId,
InitiatedID: childWorkflowInfo.InitiatedId,
Version: childWorkflowInfo.Version,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateRequestCancelExternalTasks(
now time.Time,
event *historypb.HistoryEvent,
) error {
attr := event.GetRequestCancelExternalWorkflowExecutionInitiatedEventAttributes()
scheduleID := event.GetEventId()
version := event.GetVersion()
targetNamespace := attr.GetNamespace()
targetWorkflowID := attr.GetWorkflowExecution().GetWorkflowId()
targetRunID := attr.GetWorkflowExecution().GetRunId()
targetChildOnly := attr.GetChildWorkflowOnly()
_, ok := r.mutableState.GetRequestCancelInfo(scheduleID)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending request cancel external workflow: %v", scheduleID))
}
targetNamespaceID, err := r.getTargetNamespaceID(targetNamespace)
if err != nil {
return err
}
r.mutableState.AddTransferTasks(&persistence.CancelExecutionTask{
// TaskID is set by shard
VisibilityTimestamp: now,
TargetNamespaceID: targetNamespaceID,
TargetWorkflowID: targetWorkflowID,
TargetRunID: targetRunID,
TargetChildWorkflowOnly: targetChildOnly,
InitiatedID: scheduleID,
Version: version,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateSignalExternalTasks(
now time.Time,
event *historypb.HistoryEvent,
) error {
attr := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes()
scheduleID := event.GetEventId()
version := event.GetVersion()
targetNamespace := attr.GetNamespace()
targetWorkflowID := attr.GetWorkflowExecution().GetWorkflowId()
targetRunID := attr.GetWorkflowExecution().GetRunId()
targetChildOnly := attr.GetChildWorkflowOnly()
_, ok := r.mutableState.GetSignalInfo(scheduleID)
if !ok {
return serviceerror.NewInternal(fmt.Sprintf("it could be a bug, cannot get pending signal external workflow: %v", scheduleID))
}
targetNamespaceID, err := r.getTargetNamespaceID(targetNamespace)
if err != nil {
return err
}
r.mutableState.AddTransferTasks(&persistence.SignalExecutionTask{
// TaskID is set by shard
VisibilityTimestamp: now,
TargetNamespaceID: targetNamespaceID,
TargetWorkflowID: targetWorkflowID,
TargetRunID: targetRunID,
TargetChildWorkflowOnly: targetChildOnly,
InitiatedID: scheduleID,
Version: version,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateWorkflowSearchAttrTasks(
now time.Time,
) error {
currentVersion := r.mutableState.GetCurrentVersion()
r.mutableState.AddTransferTasks(&persistence.UpsertWorkflowSearchAttributesTask{
// TaskID is set by shard
VisibilityTimestamp: now,
Version: currentVersion, // task processing does not check this version
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateWorkflowResetTasks(
now time.Time,
) error {
currentVersion := r.mutableState.GetCurrentVersion()
r.mutableState.AddTransferTasks(&persistence.ResetWorkflowTask{
// TaskID is set by shard
VisibilityTimestamp: now,
Version: currentVersion,
})
return nil
}
func (r *mutableStateTaskGeneratorImpl) generateActivityTimerTasks(
now time.Time,
) error {
_, err := r.getTimerSequence(now).createNextActivityTimer()
return err
}
func (r *mutableStateTaskGeneratorImpl) generateUserTimerTasks(
now time.Time,
) error {
_, err := r.getTimerSequence(now).createNextUserTimer()
return err
}
func (r *mutableStateTaskGeneratorImpl) getTimerSequence(now time.Time) timerSequence {
timeSource := clock.NewEventTimeSource()
timeSource.Update(now)
return newTimerSequence(timeSource, r.mutableState)
}
func (r *mutableStateTaskGeneratorImpl) getTargetNamespaceID(
targetNamespace string,
) (string, error) {
targetNamespaceID := r.mutableState.GetExecutionInfo().NamespaceId
if targetNamespace != "" {
targetNamespaceEntry, err := r.namespaceCache.GetNamespace(targetNamespace)
if err != nil {
return "", err
}
targetNamespaceID = targetNamespaceEntry.GetInfo().Id
}
return targetNamespaceID, nil
}
| 1 | 10,234 | I think this should never happen and should return an error also. Having `WorkflowBackoffTimerTask` with `UNSPECIFIED` type looks weird. | temporalio-temporal | go |
@@ -9,7 +9,7 @@ import 'webcomponents';
function onKeyDown(e) {
// Don't submit form on enter
// Real (non-emulator) Tizen does nothing on Space
- if (e.keyCode === 13 || e.keyCode === 32) {
+ if (e.keyCode === 13 || (e.keyCode === 32 && browser.tizen)) {
e.preventDefault();
if (!this.checked) { | 1 | import layoutManager from 'layoutManager';
import 'css!./emby-radio';
import 'webcomponents';
/* eslint-disable indent */
const EmbyRadioPrototype = Object.create(HTMLInputElement.prototype);
function onKeyDown(e) {
// Don't submit form on enter
// Real (non-emulator) Tizen does nothing on Space
if (e.keyCode === 13 || e.keyCode === 32) {
e.preventDefault();
if (!this.checked) {
this.checked = true;
this.dispatchEvent(new CustomEvent('change', {
bubbles: true
}));
}
return false;
}
}
EmbyRadioPrototype.attachedCallback = function () {
const showFocus = !layoutManager.mobile;
if (this.getAttribute('data-radio') === 'true') {
return;
}
this.setAttribute('data-radio', 'true');
this.classList.add('mdl-radio__button');
const labelElement = this.parentNode;
labelElement.classList.add('mdl-radio');
labelElement.classList.add('mdl-js-radio');
labelElement.classList.add('mdl-js-ripple-effect');
if (showFocus) {
labelElement.classList.add('show-focus');
}
const labelTextElement = labelElement.querySelector('span');
labelTextElement.classList.add('radioButtonLabel');
labelTextElement.classList.add('mdl-radio__label');
let html = '';
html += '<div class="mdl-radio__circles">';
html += '<svg>';
html += '<defs>';
html += '<clipPath id="cutoff">';
html += '<circle cx="50%" cy="50%" r="50%" />';
html += '</clipPath>';
html += '</defs>';
html += '<circle class="mdl-radio__outer-circle" cx="50%" cy="50%" r="50%" fill="none" stroke="currentcolor" stroke-width="0.26em" clip-path="url(#cutoff)" />';
html += '<circle class="mdl-radio__inner-circle" cx="50%" cy="50%" r="25%" fill="currentcolor" />';
html += '</svg>';
if (showFocus) {
html += '<div class="mdl-radio__focus-circle"></div>';
}
html += '</div>';
this.insertAdjacentHTML('afterend', html);
this.addEventListener('keydown', onKeyDown);
};
document.registerElement('emby-radio', {
prototype: EmbyRadioPrototype,
extends: 'input'
});
/* eslint-enable indent */
| 1 | 17,335 | Add `browser` import | jellyfin-jellyfin-web | js |
@@ -0,0 +1,18 @@
+// Copyright 2018 Keybase Inc. All rights reserved.
+// Use of this source code is governed by a BSD
+// license that can be found in the LICENSE file.
+
+package config
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+)
+
+// GenerateSHA256PasswordHash generates a sha256 hashed password hash from
+// plaintextPassword in the form of "sha256:<hash>". This is what should go
+// into the config file.
+func GenerateSHA256PasswordHash(plaintextPassword string) string {
+ hash := sha256.Sum256([]byte(plaintextPassword))
+ return "sha256:" + hex.EncodeToString(hash[:])
+} | 1 | 1 | 19,545 | Would it be good practice to add a nonce in here (that's also stored in the config), so it's harder to tell when passwords are being reused? | keybase-kbfs | go |
|
@@ -40,4 +40,8 @@ public interface TelemetryRestService {
@Produces(MediaType.APPLICATION_JSON)
TelemetryConfigurationDto getTelemetryConfiguration();
+ @GET
+ @Path("/data")
+ @Produces(MediaType.APPLICATION_JSON)
+ Object getTelemetryData();
} | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.rest;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.camunda.bpm.engine.rest.dto.TelemetryConfigurationDto;
@Produces(MediaType.APPLICATION_JSON)
public interface TelemetryRestService {
String PATH = "/telemetry";
@POST
@Path("/configuration")
@Consumes(MediaType.APPLICATION_JSON)
void configureTelemetry(TelemetryConfigurationDto dto);
@GET
@Path("/configuration")
@Produces(MediaType.APPLICATION_JSON)
TelemetryConfigurationDto getTelemetryConfiguration();
}
| 1 | 12,543 | For consistency, the return type could be `TelemetryDataDto` here. This would be more consistent with the other REST API interfaces. From a functional perspective, the current code is perfectly fine though. | camunda-camunda-bpm-platform | java |
@@ -133,7 +133,7 @@ PySGDSolver::PySGDSolver(const string& param_file) {
// as in PyNet, (as a convenience, not a guarantee), create a Python
// exception if param_file can't be opened
CheckFile(param_file);
- solver_.reset(new SGDSolver<float>(param_file));
+ solver_ = boost::make_shared<SolvingDriver<float> >(param_file);
// we need to explicitly store the net wrapper, rather than constructing
// it on the fly, so that it can hold references to Python objects
net_.reset(new PyNet(solver_->net())); | 1 | // pycaffe provides a wrapper of the caffe::Net class as well as some
// caffe::Caffe functions so that one could easily call it from Python.
// Note that for Python, we will simply use float as the data type.
#include <Python.h> // NOLINT(build/include_alpha)
#include <boost/make_shared.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
// these need to be included after boost on OS X
#include <string> // NOLINT(build/include_order)
#include <vector> // NOLINT(build/include_order)
#include <fstream> // NOLINT
#include "_caffe.hpp"
#include "caffe/caffe.hpp"
// Temporary solution for numpy < 1.7 versions: old macro, no promises.
// You're strongly advised to upgrade to >= 1.7.
#ifndef NPY_ARRAY_C_CONTIGUOUS
#define NPY_ARRAY_C_CONTIGUOUS NPY_C_CONTIGUOUS
#define PyArray_SetBaseObject(arr, x) (PyArray_BASE(arr) = (x))
#endif
namespace caffe {
// for convenience, check that input files can be opened, and raise an
// exception that boost will send to Python if not (caffe could still crash
// later if the input files are disturbed before they are actually used, but
// this saves frustration in most cases)
static void CheckFile(const string& filename) {
std::ifstream f(filename.c_str());
if (!f.good()) {
f.close();
throw std::runtime_error("Could not open file " + filename);
}
f.close();
}
bp::object PyBlobWrap::get_data() {
npy_intp dims[] = {num(), channels(), height(), width()};
PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32,
blob_->mutable_cpu_data());
PyArray_SetBaseObject(reinterpret_cast<PyArrayObject *>(obj), self_);
Py_INCREF(self_);
bp::handle<> h(obj);
return bp::object(h);
}
bp::object PyBlobWrap::get_diff() {
npy_intp dims[] = {num(), channels(), height(), width()};
PyObject *obj = PyArray_SimpleNewFromData(4, dims, NPY_FLOAT32,
blob_->mutable_cpu_diff());
PyArray_SetBaseObject(reinterpret_cast<PyArrayObject *>(obj), self_);
Py_INCREF(self_);
bp::handle<> h(obj);
return bp::object(h);
}
PyNet::PyNet(string param_file, string pretrained_param_file) {
Init(param_file);
CheckFile(pretrained_param_file);
net_->CopyTrainedLayersFrom(pretrained_param_file);
}
void PyNet::Init(string param_file) {
CheckFile(param_file);
net_.reset(new Net<float>(param_file));
}
void PyNet::check_contiguous_array(PyArrayObject* arr, string name,
int channels, int height, int width) {
if (!(PyArray_FLAGS(arr) & NPY_ARRAY_C_CONTIGUOUS)) {
throw std::runtime_error(name + " must be C contiguous");
}
if (PyArray_NDIM(arr) != 4) {
throw std::runtime_error(name + " must be 4-d");
}
if (PyArray_TYPE(arr) != NPY_FLOAT32) {
throw std::runtime_error(name + " must be float32");
}
if (PyArray_DIMS(arr)[1] != channels) {
throw std::runtime_error(name + " has wrong number of channels");
}
if (PyArray_DIMS(arr)[2] != height) {
throw std::runtime_error(name + " has wrong height");
}
if (PyArray_DIMS(arr)[3] != width) {
throw std::runtime_error(name + " has wrong width");
}
}
void PyNet::set_input_arrays(bp::object data_obj, bp::object labels_obj) {
// check that this network has an input MemoryDataLayer
shared_ptr<MemoryDataLayer<float> > md_layer =
boost::dynamic_pointer_cast<MemoryDataLayer<float> >(net_->layers()[0]);
if (!md_layer) {
throw std::runtime_error("set_input_arrays may only be called if the"
" first layer is a MemoryDataLayer");
}
// check that we were passed appropriately-sized contiguous memory
PyArrayObject* data_arr =
reinterpret_cast<PyArrayObject*>(data_obj.ptr());
PyArrayObject* labels_arr =
reinterpret_cast<PyArrayObject*>(labels_obj.ptr());
check_contiguous_array(data_arr, "data array", md_layer->channels(),
md_layer->height(), md_layer->width());
check_contiguous_array(labels_arr, "labels array", 1, 1, 1);
if (PyArray_DIMS(data_arr)[0] != PyArray_DIMS(labels_arr)[0]) {
throw std::runtime_error("data and labels must have the same first"
" dimension");
}
if (PyArray_DIMS(data_arr)[0] % md_layer->batch_size() != 0) {
throw std::runtime_error("first dimensions of input arrays must be a"
" multiple of batch size");
}
// hold references
input_data_ = data_obj;
input_labels_ = labels_obj;
md_layer->Reset(static_cast<float*>(PyArray_DATA(data_arr)),
static_cast<float*>(PyArray_DATA(labels_arr)),
PyArray_DIMS(data_arr)[0]);
}
PySGDSolver::PySGDSolver(const string& param_file) {
// as in PyNet, (as a convenience, not a guarantee), create a Python
// exception if param_file can't be opened
CheckFile(param_file);
solver_.reset(new SGDSolver<float>(param_file));
// we need to explicitly store the net wrapper, rather than constructing
// it on the fly, so that it can hold references to Python objects
net_.reset(new PyNet(solver_->net()));
for (int i = 0; i < solver_->test_nets().size(); ++i) {
test_nets_.push_back(boost::make_shared<PyNet>(solver_->test_nets()[i]));
}
}
void PySGDSolver::SolveResume(const string& resume_file) {
CheckFile(resume_file);
return solver_->Solve(resume_file);
}
BOOST_PYTHON_MODULE(_caffe) {
// below, we prepend an underscore to methods that will be replaced
// in Python
bp::class_<PyNet, shared_ptr<PyNet> >(
"Net", bp::init<string, string>())
.def(bp::init<string>())
.def("copy_from", &PyNet::CopyTrainedLayersFrom)
.def("share_with", &PyNet::ShareTrainedLayersWith)
.def("_forward", &PyNet::Forward)
.def("_backward", &PyNet::Backward)
.def("reshape", &PyNet::Reshape)
.def("set_mode_cpu", &PyNet::set_mode_cpu)
.def("set_mode_gpu", &PyNet::set_mode_gpu)
.def("set_phase_train", &PyNet::set_phase_train)
.def("set_phase_test", &PyNet::set_phase_test)
.def("set_device", &PyNet::set_device)
.add_property("_blobs", &PyNet::blobs)
.add_property("layers", &PyNet::layers)
.add_property("_blob_names", &PyNet::blob_names)
.add_property("_layer_names", &PyNet::layer_names)
.add_property("inputs", &PyNet::inputs)
.add_property("outputs", &PyNet::outputs)
.add_property("mean", &PyNet::mean_)
.add_property("input_scale", &PyNet::input_scale_)
.add_property("raw_scale", &PyNet::raw_scale_)
.add_property("channel_swap", &PyNet::channel_swap_)
.def("_set_input_arrays", &PyNet::set_input_arrays)
.def("save", &PyNet::save);
bp::class_<PyBlob<float>, PyBlobWrap>(
"Blob", bp::no_init)
.add_property("num", &PyBlob<float>::num)
.add_property("channels", &PyBlob<float>::channels)
.add_property("height", &PyBlob<float>::height)
.add_property("width", &PyBlob<float>::width)
.add_property("count", &PyBlob<float>::count)
.def("reshape", &PyBlob<float>::Reshape)
.add_property("data", &PyBlobWrap::get_data)
.add_property("diff", &PyBlobWrap::get_diff);
bp::class_<PyLayer>(
"Layer", bp::no_init)
.add_property("blobs", &PyLayer::blobs);
bp::class_<PySGDSolver, boost::noncopyable>(
"SGDSolver", bp::init<string>())
.add_property("net", &PySGDSolver::net)
.add_property("test_nets", &PySGDSolver::test_nets)
.add_property("iter", &PySGDSolver::iter)
.def("solve", &PySGDSolver::Solve)
.def("solve", &PySGDSolver::SolveResume)
.def("step", &PySGDSolver::Step);
bp::class_<vector<shared_ptr<PyNet> > >("NetVec")
.def(bp::vector_indexing_suite<vector<shared_ptr<PyNet> >, true>());
bp::class_<vector<PyBlob<float> > >("BlobVec")
.def(bp::vector_indexing_suite<vector<PyBlob<float> >, true>());
bp::class_<vector<PyLayer> >("LayerVec")
.def(bp::vector_indexing_suite<vector<PyLayer>, true>());
bp::class_<vector<string> >("StringVec")
.def(bp::vector_indexing_suite<vector<string> >());
import_array();
}
} // namespace caffe
| 1 | 31,890 | I've never seen this `boost::make_shared`...why use it instead of `reset`? | BVLC-caffe | cpp |
@@ -273,6 +273,11 @@ public abstract class AbstractExecutorManagerAdapter extends EventHandler implem
}
}
+ @Override
+ public DispatchMethod getDispatchMethod(final ExecutableFlow flow) {
+ return getDispatchMethod();
+ }
+
protected String uploadExecutableFlow(
final ExecutableFlow exflow, final String userId, final String flowId,
String message) throws ExecutorManagerException { | 1 | /*
* Copyright 2020 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import azkaban.Constants;
import azkaban.Constants.ConfigurationKeys;
import azkaban.DispatchMethod;
import azkaban.event.EventHandler;
import azkaban.flow.FlowUtils;
import azkaban.metrics.CommonMetrics;
import azkaban.project.Project;
import azkaban.project.ProjectWhitelist;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used as an abstract implementation for ExecutorManagerAdapter. It has common code
* for all the dispatch method implementations.
*/
public abstract class AbstractExecutorManagerAdapter extends EventHandler implements
ExecutorManagerAdapter {
private static final Logger logger =
LoggerFactory.getLogger(AbstractExecutorManagerAdapter.class);
protected final Props azkProps;
protected final ExecutorLoader executorLoader;
protected final CommonMetrics commonMetrics;
protected final ExecutorApiGateway apiGateway;
private final AlerterHolder alerterHolder;
private final int maxConcurrentRunsOneFlow;
private final Map<Pair<String, String>, Integer> maxConcurrentRunsPerFlowMap;
private static final Duration RECENTLY_FINISHED_LIFETIME = Duration.ofMinutes(10);
protected AbstractExecutorManagerAdapter(final Props azkProps,
final ExecutorLoader executorLoader,
final CommonMetrics commonMetrics,
final ExecutorApiGateway apiGateway,
final AlerterHolder alerterHolder) {
this.azkProps = azkProps;
this.executorLoader = executorLoader;
this.commonMetrics = commonMetrics;
this.apiGateway = apiGateway;
this.alerterHolder = alerterHolder;
this.maxConcurrentRunsOneFlow = ExecutorUtils.getMaxConcurrentRunsOneFlow(azkProps);
this.maxConcurrentRunsPerFlowMap = ExecutorUtils.getMaxConcurentRunsPerFlowMap(azkProps);
}
/**
* Fetch ExecutableFlow from database {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#getExecutableFlow(int)
*/
@Override
public ExecutableFlow getExecutableFlow(final int execId)
throws ExecutorManagerException {
return this.executorLoader.fetchExecutableFlow(execId);
}
/**
* This method is used to get size of aged queued flows from database.
*
* @return
*/
@Override
public long getAgedQueuedFlowSize() {
long size = 0L;
final int minimumAgeInMinutes = this.azkProps.getInt(
ConfigurationKeys.MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES,
Constants.DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES);
long startTime = System.currentTimeMillis();
// TODO(anish-mal) FetchQueuedExecutableFlows does a lot of processing that is redundant, since
// all we care about is the count. Write a new class that's more performant and can be used for
// metrics. this.executorLoader.fetchAgedQueuedFlows internally calls FetchQueuedExecutableFlows.
try {
size = this.executorLoader.fetchAgedQueuedFlows(Duration.ofMinutes(minimumAgeInMinutes))
.size();
logger.info("Time taken to fetch size of queued flows is {}",
(System.currentTimeMillis() - startTime) / 1000);
} catch (final ExecutorManagerException e) {
logger.error("Failed to get flows queued for a long time.", e);
}
return size;
}
/**
* This method is used to get recently finished flows from database.
*
* @return
*/
@Override
public List<ExecutableFlow> getRecentlyFinishedFlows() {
List<ExecutableFlow> flows = new ArrayList<>();
try {
flows = this.executorLoader.fetchRecentlyFinishedFlows(
RECENTLY_FINISHED_LIFETIME);
} catch (final ExecutorManagerException e) {
logger.error("Failed to fetch recently finished flows.", e);
}
return flows;
}
/**
* This method is used to get history of executions for a flow.
*
* @param skip
* @param size
* @return
* @throws ExecutorManagerException
*/
@Override
public List<ExecutableFlow> getExecutableFlows(final int skip, final int size)
throws ExecutorManagerException {
return this.executorLoader.fetchFlowHistory(skip, size);
}
@Override
public List<ExecutableFlow> getExecutableFlows(final String flowIdContains,
final int skip, final int size) throws ExecutorManagerException {
return this.executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null,
0, -1, -1, skip, size);
}
@Override
public List<ExecutableFlow> getExecutableFlows(final String projContain,
final String flowContain, final String userContain, final int status, final long begin,
final long end,
final int skip, final int size) throws ExecutorManagerException {
return this.executorLoader.fetchFlowHistory(projContain, flowContain, userContain,
status, begin, end, skip, size);
}
@Override
public int getExecutableFlows(final int projectId, final String flowId, final int from,
final int length, final List<ExecutableFlow> outputList)
throws ExecutorManagerException {
final List<ExecutableFlow> flows =
this.executorLoader.fetchFlowHistory(projectId, flowId, from, length);
outputList.addAll(flows);
return this.executorLoader.fetchNumExecutableFlows(projectId, flowId);
}
@Override
public List<ExecutableFlow> getExecutableFlows(final int projectId, final String flowId,
final int from, final int length, final Status status) throws ExecutorManagerException {
return this.executorLoader.fetchFlowHistory(projectId, flowId, from, length,
status);
}
/**
* Manage servlet call for jmx servlet in Azkaban execution server {@inheritDoc}
*
* @param hostPort
* @param action
* @param mBean
* @return
* @throws IOException
*/
@Override
public Map<String, Object> callExecutorJMX(final String hostPort, final String action,
final String mBean) throws IOException {
final List<Pair<String, String>> paramList =
new ArrayList<>();
paramList.add(new Pair<>(action, ""));
if (mBean != null) {
paramList.add(new Pair<>(ConnectorParams.JMX_MBEAN, mBean));
}
final String[] hostPortSplit = hostPort.split(":");
return this.apiGateway.callForJsonObjectMap(hostPortSplit[0],
Integer.valueOf(hostPortSplit[1]), "/jmx", null, paramList);
}
/**
* Manage servlet call for stats servlet in Azkaban execution server {@inheritDoc}
*
* @see azkaban.executor.ExecutorManagerAdapter#callExecutorStats(int, java.lang.String,
* azkaban.utils.Pair[])
*/
@Override
public Map<String, Object> callExecutorStats(final int executorId, final String action,
final Pair<String, String>... params) throws IOException, ExecutorManagerException {
final Executor executor = fetchExecutor(executorId);
final List<Pair<String, String>> paramList =
new ArrayList<>();
// if params = null
if (params != null) {
paramList.addAll(Arrays.asList(params));
}
paramList
.add(new Pair<>(ConnectorParams.ACTION_PARAM, action));
return this.apiGateway.callForJsonObjectMap(executor.getHost(), executor.getPort(),
"/stats", null, paramList);
}
@Override
public Map<String, String> doRampActions(final List<Map<String, Object>> rampActions)
throws ExecutorManagerException {
return this.executorLoader.doRampActions(rampActions);
}
/**
* This method is used to get start status for executions in queue. By default it is PREPARING.
* Implementation of this abstract class can have it's own start status in queue.
*
* @return
*/
@Override
public Status getStartStatus() {
return Status.READY;
}
/**
* When a flow is submitted, insert a new execution into the database queue. {@inheritDoc}
*/
@Override
public String submitExecutableFlow(final ExecutableFlow exflow, final String userId)
throws ExecutorManagerException {
if (exflow.isLocked()) {
// Skip execution for locked flows.
final String message = String.format("Flow %s for project %s is locked.", exflow.getId(),
exflow.getProjectName());
logger.info(message);
return message;
}
final String exFlowKey = exflow.getProjectName() + "." + exflow.getId() + ".submitFlow";
// Use project and flow name to prevent race condition when same flow is submitted by API and
// schedule at the same time
// causing two same flow submission entering this piece.
synchronized (exFlowKey.intern()) {
final String flowId = exflow.getFlowId();
logger.info("Submitting execution flow " + flowId + " by " + userId);
String message = uploadExecutableFlow(exflow, userId, flowId, "");
this.commonMetrics.markSubmitFlowSuccess();
message += "Execution queued successfully with exec id " + exflow.getExecutionId();
return message;
}
}
protected String uploadExecutableFlow(
final ExecutableFlow exflow, final String userId, final String flowId,
String message) throws ExecutorManagerException {
final int projectId = exflow.getProjectId();
exflow.setSubmitUser(userId);
exflow.setStatus(getStartStatus());
exflow.setSubmitTime(System.currentTimeMillis());
exflow.setDispatchMethod(getDispatchMethod());
// Get collection of running flows given a project and a specific flow name
final List<Integer> running = getRunningFlows(projectId, flowId);
ExecutionOptions options = exflow.getExecutionOptions();
if (options == null) {
options = new ExecutionOptions();
}
if (options.getDisabledJobs() != null) {
FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exflow);
}
if (!running.isEmpty()) {
final int maxConcurrentRuns = ExecutorUtils.getMaxConcurrentRunsForFlow(
exflow.getProjectName(), flowId, this.maxConcurrentRunsOneFlow,
this.maxConcurrentRunsPerFlowMap);
if (running.size() > maxConcurrentRuns) {
this.commonMetrics.markSubmitFlowSkip();
throw new ExecutorManagerException("Flow " + flowId
+ " has more than " + maxConcurrentRuns + " concurrent runs. Skipping",
ExecutorManagerException.Reason.SkippedExecution);
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) {
Collections.sort(running);
final Integer runningExecId = running.get(running.size() - 1);
options.setPipelineExecutionId(runningExecId);
message =
"Flow " + flowId + " is already running with exec id "
+ runningExecId + ". Pipelining level "
+ options.getPipelineLevel() + ". \n";
} else if (options.getConcurrentOption().equals(
ExecutionOptions.CONCURRENT_OPTION_SKIP)) {
this.commonMetrics.markSubmitFlowSkip();
throw new ExecutorManagerException("Flow " + flowId
+ " is already running. Skipping execution.",
ExecutorManagerException.Reason.SkippedExecution);
} else {
// The settings is to run anyways.
message =
"Flow " + flowId + " is already running with exec id "
+ StringUtils.join(running, ",")
+ ". Will execute concurrently. \n";
}
}
final boolean memoryCheck =
!ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(),
ProjectWhitelist.WhitelistType.MemoryCheck);
options.setMemoryCheck(memoryCheck);
// The exflow id is set by the loader. So it's unavailable until after
// this call.
this.executorLoader.uploadExecutableFlow(exflow);
return message;
}
@Override
public List<ExecutableJobInfo> getExecutableJobs(final Project project,
final String jobId, final int skip, final int size) throws ExecutorManagerException {
return this.executorLoader.fetchJobHistory(project.getId(), jobId, skip, size);
}
@Override
public int getNumberOfJobExecutions(final Project project, final String jobId)
throws ExecutorManagerException {
return this.executorLoader.fetchNumExecutableNodes(project.getId(), jobId);
}
/**
* Gets a list of all the unfinished (both dispatched and non-dispatched) executions for a given
* project and flow {@inheritDoc}.
*
* @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int, java.lang.String)
*/
@Override
public List<Integer> getRunningFlows(final int projectId, final String flowId) {
final List<Integer> executionIds = new ArrayList<>();
try {
executionIds.addAll(ExecutorUtils.getRunningFlowsHelper(projectId, flowId,
this.executorLoader.fetchUnfinishedFlows().values()));
} catch (final ExecutorManagerException e) {
logger.error("Failed to get running flows for project " + projectId + ", flow "
+ flowId, e);
}
return executionIds;
}
/**
* Get all running (unfinished) flows from database. {@inheritDoc}
*/
@Override
public List<ExecutableFlow> getRunningFlows() {
final ArrayList<ExecutableFlow> flows = new ArrayList<>();
try {
getFlowsHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
logger.error("Failed to get running flows.", e);
}
return flows;
}
protected LogData getFlowLogData(final ExecutableFlow exFlow, final int offset, final int length,
final Pair<ExecutionReference, ExecutableFlow> pair) throws ExecutorManagerException {
if (pair != null) {
final Pair<String, String> typeParam = new Pair<>("type", "flow");
final Pair<String, String> offsetParam =
new Pair<>("offset", String.valueOf(offset));
final Pair<String, String> lengthParam =
new Pair<>("length", String.valueOf(length));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, offsetParam, lengthParam);
return LogData.createLogDataFromObject(result);
} else {
return this.executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset,
length);
}
}
protected LogData getJobLogData(final ExecutableFlow exFlow, final String jobId, final int offset,
final int length,
final int attempt, final Pair<ExecutionReference, ExecutableFlow> pair)
throws ExecutorManagerException {
if (pair != null) {
final Pair<String, String> typeParam = new Pair<>("type", "job");
final Pair<String, String> jobIdParam =
new Pair<>("jobId", jobId);
final Pair<String, String> offsetParam =
new Pair<>("offset", String.valueOf(offset));
final Pair<String, String> lengthParam =
new Pair<>("length", String.valueOf(length));
final Pair<String, String> attemptParam =
new Pair<>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.LOG_ACTION,
typeParam, jobIdParam, offsetParam, lengthParam, attemptParam);
return LogData.createLogDataFromObject(result);
} else {
return this.executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt,
offset, length);
}
}
protected List<Object> getExecutionJobStats(
final ExecutableFlow exFlow, final String jobId, final int attempt,
final Pair<ExecutionReference, ExecutableFlow> pair) throws ExecutorManagerException {
if (pair == null) {
return this.executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId,
attempt);
}
final Pair<String, String> jobIdParam = new Pair<>("jobId", jobId);
final Pair<String, String> attemptParam =
new Pair<>("attempt", String.valueOf(attempt));
@SuppressWarnings("unchecked") final Map<String, Object> result =
this.apiGateway.callWithReference(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION,
jobIdParam, attemptParam);
@SuppressWarnings("unchecked") final List<Object> jobStats = (List<Object>) result
.get("attachments");
return jobStats;
}
@Override
public LogData getExecutableFlowLog(final ExecutableFlow exFlow, final int offset,
final int length) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader
.fetchActiveFlowByExecId(exFlow.getExecutionId());
return getFlowLogData(exFlow, offset, length, pair);
}
@Override
public LogData getExecutionJobLog(final ExecutableFlow exFlow, final String jobId,
final int offset, final int length, final int attempt) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair = this.executorLoader
.fetchActiveFlowByExecId(exFlow.getExecutionId());
return getJobLogData(exFlow, jobId, offset, length, attempt, pair);
}
@Override
public List<Object> getExecutionJobStats(final ExecutableFlow exFlow, final String jobId,
final int attempt) throws ExecutorManagerException {
final Pair<ExecutionReference, ExecutableFlow> pair =
this.executorLoader.fetchActiveFlowByExecId(exFlow.getExecutionId());
return getExecutionJobStats(exFlow, jobId, attempt, pair);
}
/**
* If a flow is already dispatched to an executor, cancel by calling Executor. Else if it's still
* queued in DB, remove it from DB queue and finalize. {@inheritDoc}
*/
@Override
public void cancelFlow(final ExecutableFlow exFlow, final String userId)
throws ExecutorManagerException {
synchronized (exFlow) {
final Map<Integer, Pair<ExecutionReference, ExecutableFlow>> unfinishedFlows = this.executorLoader
.fetchUnfinishedFlows();
if (unfinishedFlows.containsKey(exFlow.getExecutionId())) {
final Pair<ExecutionReference, ExecutableFlow> pair = unfinishedFlows
.get(exFlow.getExecutionId());
if (pair.getFirst().getExecutor().isPresent()) {
// Flow is already dispatched to an executor, so call that executor to cancel the flow.
this.apiGateway
.callWithReferenceByUser(pair.getFirst(), ConnectorParams.CANCEL_ACTION, userId);
} else {
// Flow is still queued, need to finalize it and update the status in DB.
ExecutionControllerUtils.finalizeFlow(this.executorLoader, this.alerterHolder, exFlow,
"Cancelled before dispatching to executor", null);
}
} else {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
}
}
protected Map<String, Object> modifyExecutingJobs(final ExecutableFlow exFlow,
final String command,
final String userId, final Pair<ExecutionReference, ExecutableFlow> pair,
final String[] jobIds)
throws ExecutorManagerException {
if (pair == null) {
throw new ExecutorManagerException("Execution "
+ exFlow.getExecutionId() + " of flow " + exFlow.getFlowId()
+ " isn't running.");
}
final Map<String, Object> response;
if (jobIds != null && jobIds.length > 0) {
for (final String jobId : jobIds) {
if (!jobId.isEmpty()) {
final ExecutableNode node = exFlow.getExecutableNode(jobId);
if (node == null) {
throw new ExecutorManagerException("Job " + jobId
+ " doesn't exist in execution " + exFlow.getExecutionId()
+ ".");
}
}
}
final String ids = StringUtils.join(jobIds, ',');
response =
this.apiGateway.callWithReferenceByUser(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command),
new Pair<>(ConnectorParams.MODIFY_JOBS_LIST, ids));
} else {
response =
this.apiGateway.callWithReferenceByUser(pair.getFirst(),
ConnectorParams.MODIFY_EXECUTION_ACTION, userId,
new Pair<>(
ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command));
}
return response;
}
/**
* If the Resource Manager and Job History server urls are configured, find all the Hadoop/Spark
* application ids present in the Azkaban job's log and then construct the url to job logs in the
* Hadoop/Spark server for each application id found. Application ids are returned in the order
* they appear in the Azkaban job log.
*
* @param exFlow The executable flow.
* @param jobId The job id.
* @param attempt The job execution attempt.
* @return The map of (application id, job log url)
*/
@Override
public Map<String, String> getExternalJobLogUrls(final ExecutableFlow exFlow, final String jobId,
final int attempt) {
final Map<String, String> jobLogUrlsByAppId = new LinkedHashMap<>();
final Set<String> applicationIds = getApplicationIds(exFlow, jobId, attempt);
for (final String applicationId : applicationIds) {
final String jobLogUrl = ExecutionControllerUtils
.createJobLinkUrl(exFlow, jobId, applicationId, this.azkProps);
if (jobLogUrl != null) {
jobLogUrlsByAppId.put(applicationId, jobLogUrl);
}
}
return jobLogUrlsByAppId;
}
@Override
public List<Pair<ExecutableFlow, Optional<Executor>>> getActiveFlowsWithExecutor() {
final List<Pair<ExecutableFlow, Optional<Executor>>> flows = new ArrayList<>();
try {
getActiveFlowsWithExecutorHelper(flows, this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
logger.error("Failed to get active flows with executor.", e);
}
return flows;
}
/**
* Checks whether the given flow has an active (running, non-dispatched) execution from database.
* {@inheritDoc}
*/
@Override
public boolean isFlowRunning(final int projectId, final String flowId) {
boolean isRunning = false;
try {
isRunning = isFlowRunningHelper(projectId, flowId,
this.executorLoader.fetchUnfinishedFlows().values());
} catch (final ExecutorManagerException e) {
logger.error(
"Failed to check if the flow is running for project " + projectId + ", flow " + flowId,
e);
}
return isRunning;
}
/**
* Find all the Hadoop/Spark application ids present in the Azkaban job log. When iterating over
* the set returned by this method the application ids are in the same order they appear in the
* log.
*
* @param exFlow The executable flow.
* @param jobId The job id.
* @param attempt The job execution attempt.
* @return The application ids found.
*/
Set<String> getApplicationIds(final ExecutableFlow exFlow, final String jobId,
final int attempt) {
final Set<String> applicationIds = new LinkedHashSet<>();
int offset = 0;
try {
LogData data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt);
while (data != null && data.getLength() > 0) {
logger.info("Get application ID for execution " + exFlow.getExecutionId() + ", job"
+ " " + jobId + ", attempt " + attempt + ", data offset " + offset);
String logData = data.getData();
final int indexOfLastSpace = logData.lastIndexOf(' ');
final int indexOfLastTab = logData.lastIndexOf('\t');
final int indexOfLastEoL = logData.lastIndexOf('\n');
final int indexOfLastDelim = Math
.max(indexOfLastEoL, Math.max(indexOfLastSpace, indexOfLastTab));
if (indexOfLastDelim > -1) {
// index + 1 to avoid looping forever if indexOfLastDelim is zero
logData = logData.substring(0, indexOfLastDelim + 1);
}
applicationIds.addAll(ExecutionControllerUtils.findApplicationIdsFromLog(logData));
offset = data.getOffset() + logData.length();
data = getExecutionJobLog(exFlow, jobId, offset, 50000, attempt);
}
} catch (final ExecutorManagerException e) {
logger.error("Failed to get application ID for execution " + exFlow.getExecutionId() +
", job " + jobId + ", attempt " + attempt + ", data offset " + offset, e);
}
return applicationIds;
}
/* Helper method to get all execution ids from collection in sorted order. */
protected void getExecutionIdsHelper(final List<Integer> allIds,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
collection.stream().forEach(ref -> allIds.add(ref.getSecond().getExecutionId()));
Collections.sort(allIds);
}
/* Search a running flow in a collection */
protected boolean isFlowRunningHelper(final int projectId, final String flowId,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
if (ref.getSecond().getProjectId() == projectId
&& ref.getSecond().getFlowId().equals(flowId)) {
return true;
}
}
return false;
}
/**
* Helper method to get all flows from collection.
*/
protected void getFlowsHelper(final ArrayList<ExecutableFlow> flows,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
collection.stream().forEach(ref -> flows.add(ref.getSecond()));
}
/* Helper method for getActiveFlowsWithExecutor */
protected void getActiveFlowsWithExecutorHelper(
final List<Pair<ExecutableFlow, Optional<Executor>>> flows,
final Collection<Pair<ExecutionReference, ExecutableFlow>> collection) {
for (final Pair<ExecutionReference, ExecutableFlow> ref : collection) {
flows.add(new Pair<>(ref.getSecond(), ref
.getFirst().getExecutor()));
}
}
}
| 1 | 21,610 | Wasn't this added as a part of different PR already? | azkaban-azkaban | java |
@@ -8,7 +8,7 @@ class UrlListData
public const FIELD_SLUG = 'slug';
/**
- * @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrl[]
+ * @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrl[][]
*/
public $toDelete;
| 1 | <?php
namespace Shopsys\FrameworkBundle\Component\Router\FriendlyUrl;
class UrlListData
{
public const FIELD_DOMAIN = 'domain';
public const FIELD_SLUG = 'slug';
/**
* @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrl[]
*/
public $toDelete;
/**
* @var \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrl[]
*/
public $mainFriendlyUrlsByDomainId;
/**
* @var array[]
*
* Format:
* [
* [
* 'domain' => 1,
* 'slug' => 'slug-for-the-first-domain',
* ],
* ...
* ]
*
* @see \Shopsys\FrameworkBundle\Component\Router\FriendlyUrl\FriendlyUrlFacade::saveUrlListFormData()
*/
public $newUrls;
public function __construct()
{
$this->toDelete = [];
$this->mainFriendlyUrlsByDomainId = [];
$this->newUrls = [];
}
}
| 1 | 16,306 | hmmmmm, should not be in constructor in this case `$this->toDelete = [[]];` ??? maybe not | shopsys-shopsys | php |
@@ -114,8 +114,11 @@ def toggle_routing_control_state(routing_control_arn, cluster_endpoints):
update_state = 'Off' if state == 'On' else 'On'
print(f"Setting control state to '{update_state}'.")
response = update_routing_control_state(routing_control_arn, cluster_endpoints, update_state)
- if response['ResponseMetadata']['HTTPStatusCode'] == 200:
+ status = response.get('ResponseMetadata', {}).get('HTTPStatusCode', 'Unknown')
+ if status == 200:
print('Success!')
+ else:
+ print(f'Something went wrong. Status: {status}.')
print('-'*88)
# snippet-end:[python.example_code.route53-recovery-cluster.Scenario_SetControlState]
| 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Route 53 Application
Recovery Controller to manage routing controls.
"""
import argparse
import json
# snippet-start:[python.example_code.route53-recovery-cluster.helper.get_recovery_client]
import boto3
def create_recovery_client(cluster_endpoint):
"""
Creates a Boto3 Route 53 Application Recovery Controller for the specified
cluster endpoint URL and AWS Region.
:param cluster_endpoint: The cluster endpoint URL and Region.
:return: The Boto3 client.
"""
return boto3.client(
'route53-recovery-cluster',
endpoint_url=cluster_endpoint['Endpoint'],
region_name=cluster_endpoint['Region'])
# snippet-end:[python.example_code.route53-recovery-cluster.helper.get_recovery_client]
# snippet-start:[python.example_code.route53-recovery-cluster.GetRoutingControlState]
def get_routing_control_state(routing_control_arn, cluster_endpoints):
"""
Gets the state of a routing control for a cluster. Endpoints are tried in
sequence until the first successful response is received.
:param routing_control_arn: The ARN of the routing control to look up.
:param cluster_endpoints: The list of cluster endpoints to query.
:return: The routing control state response.
"""
for cluster_endpoint in cluster_endpoints:
try:
recovery_client = create_recovery_client(cluster_endpoint)
response = recovery_client.get_routing_control_state(
RoutingControlArn=routing_control_arn)
return response
except Exception as error:
print(error)
# snippet-end:[python.example_code.route53-recovery-cluster.GetRoutingControlState]
# snippet-start:[python.example_code.route53-recovery-cluster.UpdateRoutingControlState]
def update_routing_control_state(
routing_control_arn, cluster_endpoints, routing_control_state):
"""
Updates the state of a routing control for a cluster. Endpoints are tried in
sequence until the first successful response is received.
:param routing_control_arn: The ARN of the routing control to set.
:param cluster_endpoints: The list of cluster endpoints to set.
:param routing_control_state: The state to set for the routing control.
:return: The routing control update response for each endpoint.
"""
for cluster_endpoint in cluster_endpoints:
try:
recovery_client = create_recovery_client(cluster_endpoint)
response = recovery_client.update_routing_control_state(
RoutingControlArn=routing_control_arn,
RoutingControlState=routing_control_state)
return response
except Exception as error:
print(error)
# snippet-end:[python.example_code.route53-recovery-cluster.UpdateRoutingControlState]
# snippet-start:[python.example_code.route53-recovery-cluster.UpdateRoutingControlStates]
def update_routing_control_states(
update_routing_control_state_entries, cluster_endpoints):
"""
Updates the state of a list of routing controls for cluster. Endpoints are tried in
sequence until the first successful response is received.
:param update_routing_control_state_entries: The list of routing controls to
update for each endpoint and the state
to set them to.
:param cluster_endpoints: The list of cluster endpoints to set.
:return: The routing control update response for each endpoint.
"""
for cluster_endpoint in cluster_endpoints:
try:
recovery_client = create_recovery_client(cluster_endpoint)
response = recovery_client.update_routing_control_states(
UpdateRoutingControlStateEntries=update_routing_control_state_entries)
return response
except Exception as error:
print(error)
# snippet-end:[python.example_code.route53-recovery-cluster.UpdateRoutingControlStates]
# snippet-start:[python.example_code.route53-recovery-cluster.Scenario_SetControlState]
def toggle_routing_control_state(routing_control_arn, cluster_endpoints):
"""
Shows how to get and set the state of a routing control for a cluster.
"""
response = get_routing_control_state(routing_control_arn, cluster_endpoints)
state = response['RoutingControlState']
print('-'*88)
print(
f"Starting state of control {routing_control_arn}: {state}")
print('-'*88)
update_state = 'Off' if state == 'On' else 'On'
print(f"Setting control state to '{update_state}'.")
response = update_routing_control_state(routing_control_arn, cluster_endpoints, update_state)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
print('Success!')
print('-'*88)
# snippet-end:[python.example_code.route53-recovery-cluster.Scenario_SetControlState]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('routing_control_arn', help="The ARN of the routing control.")
parser.add_argument(
'cluster_endpoints', help="The list of endpoints for the cluster, in JSON format.")
args = parser.parse_args()
toggle_routing_control_state(
args.routing_control_arn, json.loads(args.cluster_endpoints))
| 1 | 21,304 | We can just update it to be as follows: `if response: print("Success") else: print("Error")` | awsdocs-aws-doc-sdk-examples | rb |
@@ -119,6 +119,7 @@ type Helper struct {
matcherDefs map[string]caddy.ModuleMap
parentBlock caddyfile.ServerBlock
groupCounter counter
+ state map[string]interface{}
}
// Option gets the option keyed by name. | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"sort"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// directiveOrder specifies the order
// to apply directives in HTTP routes.
var directiveOrder = []string{
"redir",
"rewrite",
"root",
"strip_prefix",
"strip_suffix",
"uri_replace",
"try_files",
// middleware handlers that typically wrap responses
"basicauth",
"header",
"request_header",
"encode",
"templates",
"handle",
"route",
// handlers that typically respond to requests
"respond",
"reverse_proxy",
"php_fastcgi",
"file_server",
}
// directiveIsOrdered returns true if dir is
// a known, ordered (sorted) directive.
func directiveIsOrdered(dir string) bool {
for _, d := range directiveOrder {
if d == dir {
return true
}
}
return false
}
// RegisterDirective registers a unique directive dir with an
// associated unmarshaling (setup) function. When directive dir
// is encountered in a Caddyfile, setupFunc will be called to
// unmarshal its tokens.
func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
if _, ok := registeredDirectives[dir]; ok {
panic("directive " + dir + " already registered")
}
registeredDirectives[dir] = setupFunc
}
// RegisterHandlerDirective is like RegisterDirective, but for
// directives which specifically output only an HTTP handler.
// Directives registered with this function will always have
// an optional matcher token as the first argument.
func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
if !h.Next() {
return nil, h.ArgErr()
}
matcherSet, ok, err := h.MatcherToken()
if err != nil {
return nil, err
}
if ok {
// strip matcher token; we don't need to
// use the return value here because a
// new dispenser should have been made
// solely for this directive's tokens,
// with no other uses of same slice
h.Dispenser.Delete()
}
h.Dispenser.Reset() // pretend this lookahead never happened
val, err := setupFunc(h)
if err != nil {
return nil, err
}
return h.NewRoute(matcherSet, val), nil
})
}
// Helper is a type which helps setup a value from
// Caddyfile tokens.
type Helper struct {
*caddyfile.Dispenser
options map[string]interface{}
warnings *[]caddyconfig.Warning
matcherDefs map[string]caddy.ModuleMap
parentBlock caddyfile.ServerBlock
groupCounter counter
}
// Option gets the option keyed by name.
func (h Helper) Option(name string) interface{} {
return h.options[name]
}
// Caddyfiles returns the list of config files from
// which tokens in the current server block were loaded.
func (h Helper) Caddyfiles() []string {
// first obtain set of names of files involved
// in this server block, without duplicates
files := make(map[string]struct{})
for _, segment := range h.parentBlock.Segments {
for _, token := range segment {
files[token.File] = struct{}{}
}
}
// then convert the set into a slice
filesSlice := make([]string, 0, len(files))
for file := range files {
filesSlice = append(filesSlice, file)
}
return filesSlice
}
// JSON converts val into JSON. Any errors are added to warnings.
func (h Helper) JSON(val interface{}) json.RawMessage {
return caddyconfig.JSON(val, h.warnings)
}
// MatcherToken assumes the next argument token is (possibly) a matcher,
// and if so, returns the matcher set along with a true value. If the next
// token is not a matcher, nil and false is returned. Note that a true
// value may be returned with a nil matcher set if it is a catch-all.
func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) {
if !h.NextArg() {
return nil, false, nil
}
return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
}
// ExtractMatcherSet is like MatcherToken, except this is a higher-level
// method that returns the matcher set described by the matcher token,
// or nil if there is none, and deletes the matcher token from the
// dispenser and resets it as if this look-ahead never happened. Useful
// when wrapping a route (one or more handlers) in a user-defined matcher.
func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
matcherSet, hasMatcher, err := h.MatcherToken()
if err != nil {
return nil, err
}
if hasMatcher {
h.Dispenser.Delete() // strip matcher token
}
h.Dispenser.Reset() // pretend this lookahead never happened
return matcherSet, nil
}
// NewRoute returns config values relevant to creating a new HTTP route.
func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
handler caddyhttp.MiddlewareHandler) []ConfigValue {
mod, err := caddy.GetModule(caddy.GetModuleID(handler))
if err != nil {
*h.warnings = append(*h.warnings, caddyconfig.Warning{
File: h.File(),
Line: h.Line(),
Message: err.Error(),
})
return nil
}
var matcherSetsRaw []caddy.ModuleMap
if matcherSet != nil {
matcherSetsRaw = append(matcherSetsRaw, matcherSet)
}
return []ConfigValue{
{
Class: "route",
Value: caddyhttp.Route{
MatcherSetsRaw: matcherSetsRaw,
HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)},
},
},
}
}
// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the
// same group, if there is more than one route in vals.
func (h Helper) GroupRoutes(vals []ConfigValue) {
// ensure there's at least two routes; group of one is pointless
var count int
for _, v := range vals {
if _, ok := v.Value.(caddyhttp.Route); ok {
count++
if count > 1 {
break
}
}
}
if count < 2 {
return
}
// now that we know the group will have some effect, do it
groupName := h.groupCounter.nextGroup()
for i := range vals {
if route, ok := vals[i].Value.(caddyhttp.Route); ok {
route.Group = groupName
vals[i].Value = route
}
}
}
// NewBindAddresses returns config values relevant to adding
// listener bind addresses to the config.
func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
return []ConfigValue{{Class: "bind", Value: addrs}}
}
// ConfigValue represents a value to be added to the final
// configuration, or a value to be consulted when building
// the final configuration.
type ConfigValue struct {
// The kind of value this is. As the config is
// being built, the adapter will look in the
// "pile" for values belonging to a certain
// class when it is setting up a certain part
// of the config. The associated value will be
// type-asserted and placed accordingly.
Class string
// The value to be used when building the config.
// Generally its type is associated with the
// name of the Class.
Value interface{}
directive string
}
func sortRoutes(routes []ConfigValue) {
dirPositions := make(map[string]int)
for i, dir := range directiveOrder {
dirPositions[dir] = i
}
// while we are sorting, we will need to decode a route's path matcher
// in order to sub-sort by path length; we can amortize this operation
// for efficiency by storing the decoded matchers in a slice
decodedMatchers := make([]caddyhttp.MatchPath, len(routes))
sort.SliceStable(routes, func(i, j int) bool {
iDir, jDir := routes[i].directive, routes[j].directive
if iDir == jDir {
// directives are the same; sub-sort by path matcher length
// if there's only one matcher set and one path (common case)
iRoute, ok := routes[i].Value.(caddyhttp.Route)
if !ok {
return false
}
jRoute, ok := routes[j].Value.(caddyhttp.Route)
if !ok {
return false
}
// use already-decoded matcher, or decode if it's the first time seeing it
iPM, jPM := decodedMatchers[i], decodedMatchers[j]
if iPM == nil && len(iRoute.MatcherSetsRaw) == 1 {
var pathMatcher caddyhttp.MatchPath
_ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &pathMatcher)
decodedMatchers[i] = pathMatcher
iPM = pathMatcher
}
if jPM == nil && len(jRoute.MatcherSetsRaw) == 1 {
var pathMatcher caddyhttp.MatchPath
_ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &pathMatcher)
decodedMatchers[j] = pathMatcher
jPM = pathMatcher
}
// sort by longer path (more specific) first; missing
// path matchers are treated as zero-length paths
var iPathLen, jPathLen int
if iPM != nil {
iPathLen = len(iPM[0])
}
if jPM != nil {
jPathLen = len(jPM[0])
}
return iPathLen > jPathLen
}
return dirPositions[iDir] < dirPositions[jDir]
})
}
// parseSegmentAsSubroute parses the segment such that its subdirectives
// are themselves treated as directives, from which a subroute is built
// and returned.
func parseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
var allResults []ConfigValue
for h.Next() {
// slice the linear list of tokens into top-level segments
var segments []caddyfile.Segment
for nesting := h.Nesting(); h.NextBlock(nesting); {
segments = append(segments, h.NextSegment())
}
// copy existing matcher definitions so we can augment
// new ones that are defined only in this scope
matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
for key, val := range h.matcherDefs {
matcherDefs[key] = val
}
// find and extract any embedded matcher definitions in this scope
for i, seg := range segments {
if strings.HasPrefix(seg.Directive(), matcherPrefix) {
err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
if err != nil {
return nil, err
}
segments = append(segments[:i], segments[i+1:]...)
}
}
// with matchers ready to go, evaluate each directive's segment
for _, seg := range segments {
dir := seg.Directive()
dirFunc, ok := registeredDirectives[dir]
if !ok {
return nil, h.Errf("unrecognized directive: %s", dir)
}
subHelper := h
subHelper.Dispenser = caddyfile.NewDispenser(seg)
subHelper.matcherDefs = matcherDefs
results, err := dirFunc(subHelper)
if err != nil {
return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
}
for _, result := range results {
result.directive = dir
allResults = append(allResults, result)
}
}
}
return buildSubroute(allResults, h.groupCounter)
}
// serverBlock pairs a Caddyfile server block
// with a "pile" of config values, keyed by class
// name.
type serverBlock struct {
block caddyfile.ServerBlock
pile map[string][]ConfigValue // config values obtained from directives
}
type (
// UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type.
// These are passed in a call to RegisterDirective.
UnmarshalFunc func(h Helper) ([]ConfigValue, error)
// UnmarshalHandlerFunc is like UnmarshalFunc, except the
// output of the unmarshaling is an HTTP handler. This
// function does not need to deal with HTTP request matching
// which is abstracted away. Since writing HTTP handlers
// with Caddyfile support is very common, this is a more
// convenient way to add a handler to the chain since a lot
// of the details common to HTTP handlers are taken care of
// for you. These are passed to a call to
// RegisterHandlerDirective.
UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
)
var registeredDirectives = make(map[string]UnmarshalFunc)
| 1 | 14,240 | Maybe this should be exported so (external/third-party) plugins can also use it. | caddyserver-caddy | go |
@@ -38,17 +38,9 @@ func (service *servicePFCtl) Add(rule RuleForwarding) {
}
func (service *servicePFCtl) Start() error {
- err := service.ipForward.Enable()
- if err != nil {
- return err
- }
-
+ service.ipForward.Enable()
service.clearStaleRules()
- err = service.enableRules()
- if err != nil {
- return err
- }
- return nil
+ return service.enableRules()
}
func (service *servicePFCtl) Stop() { | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package nat
import (
"errors"
"fmt"
"net"
"os/exec"
"strings"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/utils"
)
type servicePFCtl struct {
rules []RuleForwarding
ipForward serviceIPForward
}
func (service *servicePFCtl) Add(rule RuleForwarding) {
service.rules = append(service.rules, rule)
}
func (service *servicePFCtl) Start() error {
err := service.ipForward.Enable()
if err != nil {
return err
}
service.clearStaleRules()
err = service.enableRules()
if err != nil {
return err
}
return nil
}
func (service *servicePFCtl) Stop() {
service.disableRules()
service.ipForward.Disable()
}
func ifaceByAddress(ipAddress string) (string, error) {
ifaces, err := net.Interfaces()
if err != nil {
return "", err
}
for _, ifi := range ifaces {
addresses, err := ifi.Addrs()
if err != nil {
return "", err
}
for _, address := range addresses {
if address.(*net.IPNet).IP.String() == ipAddress {
return ifi.Name, nil
}
}
}
return "", errors.New("not able to determine outbound ethernet interface")
}
func (service *servicePFCtl) enableRules() error {
for _, rule := range service.rules {
iface, err := ifaceByAddress(rule.TargetIP)
if err != nil {
return err
}
natRule := fmt.Sprintf("nat on %v inet from %v to any -> %v", iface, rule.SourceAddress, rule.TargetIP)
arguments := fmt.Sprintf(`echo "%v" | /sbin/pfctl -vEf -`, natRule)
cmd := exec.Command(
"sh",
"-c",
arguments,
)
if output, err := cmd.CombinedOutput(); err != nil {
if !strings.Contains(string(output), natRule) {
log.Warn("Failed to create pfctl rule: ", cmd.Args, " Returned exit error: ", err.Error(), " Cmd output: ", string(output))
return err
}
}
log.Info(natLogPrefix, "NAT rule from '", rule.SourceAddress, "' to IP: ", rule.TargetIP, " added")
}
return nil
}
func (service *servicePFCtl) disableRules() {
cmd := utils.SplitCommand("/sbin/pfctl", "-F nat")
if output, err := cmd.CombinedOutput(); err != nil {
log.Warn("Failed cleanup pfctl rules: ", cmd.Args, " Returned exit error: ", err.Error(), " Cmd output: ", string(output))
}
log.Info(natLogPrefix, "NAT rules cleared")
}
func (service *servicePFCtl) clearStaleRules() {
service.disableRules()
}
| 1 | 12,007 | Warning logging whould be exactly here in high logic. So that everybody understands why we swallow errors | mysteriumnetwork-node | go |
@@ -46,6 +46,7 @@ namespace Nethermind.JsonRpc.WebSockets
string clientName,
ISocketHandler handler,
RpcEndpoint endpointType,
+ JsonRpcUrl? url,
IJsonRpcProcessor jsonRpcProcessor,
IJsonRpcService jsonRpcService,
IJsonRpcLocalStats jsonRpcLocalStats, | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Buffers;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Nethermind.Core;
using Nethermind.Core.Extensions;
using Nethermind.JsonRpc.Modules;
using Nethermind.Logging;
using Nethermind.Serialization.Json;
using Nethermind.Sockets;
namespace Nethermind.JsonRpc.WebSockets
{
public class JsonRpcSocketsClient : SocketClient, IJsonRpcDuplexClient
{
public event EventHandler Closed;
private readonly IJsonRpcProcessor _jsonRpcProcessor;
private readonly IJsonRpcService _jsonRpcService;
private readonly IJsonRpcLocalStats _jsonRpcLocalStats;
private readonly JsonRpcContext _jsonRpcContext;
public JsonRpcSocketsClient(
string clientName,
ISocketHandler handler,
RpcEndpoint endpointType,
IJsonRpcProcessor jsonRpcProcessor,
IJsonRpcService jsonRpcService,
IJsonRpcLocalStats jsonRpcLocalStats,
IJsonSerializer jsonSerializer)
: base(clientName, handler, jsonSerializer)
{
_jsonRpcProcessor = jsonRpcProcessor;
_jsonRpcService = jsonRpcService;
_jsonRpcLocalStats = jsonRpcLocalStats;
_jsonRpcContext = new JsonRpcContext(endpointType, this);
}
public override void Dispose()
{
base.Dispose();
Closed?.Invoke(this, EventArgs.Empty);
}
public override async Task ProcessAsync(ArraySegment<byte> data)
{
Stopwatch stopwatch = Stopwatch.StartNew();
IncrementBytesReceivedMetric(data.Count);
using TextReader request = new StreamReader(new MemoryStream(data.Array!, data.Offset, data.Count), Encoding.UTF8);
int allResponsesSize = 0;
await foreach (JsonRpcResult result in _jsonRpcProcessor.ProcessAsync(request, _jsonRpcContext))
{
using (result)
{
int singleResponseSize = await SendJsonRpcResult(result);
allResponsesSize += singleResponseSize;
if (result.IsCollection)
{
_jsonRpcLocalStats.ReportCalls(result.Reports);
long handlingTimeMicroseconds = stopwatch.ElapsedMicroseconds();
_jsonRpcLocalStats.ReportCall(new RpcReport("# collection serialization #", handlingTimeMicroseconds, true), handlingTimeMicroseconds, singleResponseSize);
stopwatch.Restart();
}
else
{
long handlingTimeMicroseconds = stopwatch.ElapsedMicroseconds();
_jsonRpcLocalStats.ReportCall(result.Report, handlingTimeMicroseconds, singleResponseSize);
stopwatch.Restart();
}
}
}
IncrementBytesSentMetric(allResponsesSize);
}
private void IncrementBytesReceivedMetric(int size)
{
if (_jsonRpcContext.RpcEndpoint == RpcEndpoint.WebSocket)
{
Interlocked.Add(ref Metrics.JsonRpcBytesReceivedWebSockets, size);
}
if (_jsonRpcContext.RpcEndpoint == RpcEndpoint.IPC)
{
Interlocked.Add(ref Metrics.JsonRpcBytesReceivedIpc, size);
}
}
private void IncrementBytesSentMetric(int size)
{
if (_jsonRpcContext.RpcEndpoint == RpcEndpoint.WebSocket)
{
Interlocked.Add(ref Metrics.JsonRpcBytesSentWebSockets, size);
}
if (_jsonRpcContext.RpcEndpoint == RpcEndpoint.IPC)
{
Interlocked.Add(ref Metrics.JsonRpcBytesSentIpc, size);
}
}
public virtual async Task<int> SendJsonRpcResult(JsonRpcResult result)
{
void SerializeTimeoutException(MemoryStream stream)
{
JsonRpcErrorResponse error = _jsonRpcService.GetErrorResponse(ErrorCodes.Timeout, "Request was canceled due to enabled timeout.");
_jsonSerializer.Serialize(stream, error);
}
await using MemoryStream resultData = new();
try
{
if (result.IsCollection)
{
_jsonSerializer.Serialize(resultData, result.Responses);
}
else
{
_jsonSerializer.Serialize(resultData, result.Response);
}
}
catch (Exception e) when (e.InnerException is OperationCanceledException)
{
SerializeTimeoutException(resultData);
}
catch (OperationCanceledException)
{
SerializeTimeoutException(resultData);
}
if (resultData.TryGetBuffer(out ArraySegment<byte> data))
{
await _handler.SendRawAsync(data);
return data.Count;
}
return (int)resultData.Length;
}
}
}
| 1 | 26,340 | if its optional, move it to last item an use JsonRpcUrl? url = null | NethermindEth-nethermind | .cs |
@@ -58,6 +58,14 @@ class User < ActiveRecord::Base
user
end
+ def client_model
+ Proposal.client_model_for(self)
+ end
+
+ def client_model_slug
+ client_model.to_s.underscore.tr("/", "_")
+ end
+
def add_role(role_name)
role = Role.find_or_create_by!(name: role_name)
user_roles.find_or_create_by!(role: role) | 1 | class User < ActiveRecord::Base
has_paper_trail class_name: 'C2Version'
validates :client_slug, inclusion: {
in: ->(_) { Proposal.client_slugs },
message: "'%{value}' is not in Proposal.client_slugs #{Proposal.client_slugs.inspect}",
allow_blank: true
}
validates :email_address, presence: true, uniqueness: true
validates_email_format_of :email_address
has_many :steps, dependent: :destroy
has_many :comments, dependent: :destroy
has_many :observations, dependent: :destroy
has_many :user_roles, dependent: :destroy
has_many :roles, through: :user_roles
has_many :proposals, foreign_key: "requester_id", dependent: :destroy
has_many :outgoing_delegations, class_name: 'ApprovalDelegate', foreign_key: 'assigner_id'
has_many :outgoing_delegates, through: :outgoing_delegations, source: :assignee
has_many :incoming_delegations, class_name: 'ApprovalDelegate', foreign_key: 'assignee_id'
has_many :incoming_delegates, through: :incoming_delegations, source: :assigner
has_many :completed_steps, class_name: "Step", foreign_key: "completer"
def self.active
where(active: true)
end
def self.sql_for_role_slug(role_name, slug)
with_role(role_name).select(:id).where(client_slug: slug).to_sql
end
def self.with_role(role_name)
User.joins(:roles).where(roles: { name: role_name })
end
def self.for_email(email)
User.find_or_create_by(email_address: email.strip.downcase)
end
def self.for_email_with_slug(email, client_slug)
user = for_email(email)
unless user.client_slug
user.client_slug = client_slug
end
user
end
def self.from_oauth_hash(auth_hash)
user_data = auth_hash.extra.raw_info.to_hash
user = for_email(user_data["email"])
user.update_names_if_present(user_data)
user
end
def add_role(role_name)
role = Role.find_or_create_by!(name: role_name)
user_roles.find_or_create_by!(role: role)
end
def full_name
if first_name.present? && last_name.present?
"#{first_name} #{last_name}"
else
email_address
end
end
def display_name
if full_name == email_address
email_address
else
"#{full_name} <#{email_address}>"
end
end
def last_requested_proposal
proposals.order("created_at DESC").first
end
def add_delegate(other)
outgoing_delegations.create!(assignee: other)
end
def delegates_to?(other)
outgoing_delegations.exists?(assignee: other)
end
def client_admin?
roles.exists?(name: "client_admin")
end
def admin?
roles.exists?(name: "admin")
end
def not_admin?
!admin?
end
def deactivated?
!active?
end
def update_names_if_present(user_data)
%w(first_name last_name).each do |field|
attr = field.to_sym
if user_data[field].present? && send(attr).blank?
update_attributes(attr => user_data[field])
end
end
end
def role_on(proposal)
RolePicker.new(self, proposal)
end
def requires_profile_attention?
first_name.blank? || last_name.blank?
end
end
| 1 | 16,174 | how is this different from the `client_slug` method already available on a `user` ? | 18F-C2 | rb |
@@ -61,7 +61,7 @@ class UnboundZmqEventBus implements EventBus {
return thread;
});
- LOG.info(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
+ LOG.finest(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
sub = context.createSocket(SocketType.SUB);
sub.connect(publishConnection); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.events.zeromq;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.collect.EvictingQueue;
import org.openqa.selenium.events.Event;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.Type;
import org.openqa.selenium.json.Json;
import org.zeromq.SocketType;
import org.zeromq.ZContext;
import org.zeromq.ZMQ;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Queue;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.logging.Logger;
class UnboundZmqEventBus implements EventBus {
private static final Logger LOG = Logger.getLogger(EventBus.class.getName());
private static final Json JSON = new Json();
private final ExecutorService executor;
private final Map<Type, List<Consumer<Event>>> listeners = new ConcurrentHashMap<>();
private final Queue<UUID> recentMessages = EvictingQueue.create(128);
private ZMQ.Socket pub;
private ZMQ.Socket sub;
UnboundZmqEventBus(ZContext context, String publishConnection, String subscribeConnection) {
executor = Executors.newCachedThreadPool(r -> {
Thread thread = new Thread(r);
thread.setName("Event Bus");
thread.setDaemon(true);
return thread;
});
LOG.info(String.format("Connecting to %s and %s", publishConnection, subscribeConnection));
sub = context.createSocket(SocketType.SUB);
sub.connect(publishConnection);
sub.subscribe(new byte[0]);
pub = context.createSocket(SocketType.PUB);
pub.connect(subscribeConnection);
ZMQ.Poller poller = context.createPoller(1);
poller.register(sub, ZMQ.Poller.POLLIN);
LOG.info("Sockets created");
AtomicBoolean pollingStarted = new AtomicBoolean(false);
executor.submit(() -> {
LOG.info("Bus started");
while (!Thread.currentThread().isInterrupted()) {
try {
poller.poll(150);
pollingStarted.lazySet(true);
if (poller.pollin(0)) {
ZMQ.Socket socket = poller.getSocket(0);
Type type = new Type(new String(socket.recv(ZMQ.DONTWAIT), UTF_8));
UUID id = UUID.fromString(new String(socket.recv(ZMQ.DONTWAIT), UTF_8));
String data = new String(socket.recv(ZMQ.DONTWAIT), UTF_8);
Object converted = JSON.toType(data, Object.class);
Event event = new Event(id, type, converted);
if (recentMessages.contains(id)) {
continue;
}
recentMessages.add(id);
List<Consumer<Event>> typeListeners = listeners.get(type);
if (typeListeners == null) {
continue;
}
typeListeners.parallelStream().forEach(listener -> listener.accept(event));
}
} catch (Throwable e) {
if (e.getCause() != null && e.getCause() instanceof AssertionError) {
// Do nothing.
} else {
throw e;
}
}
}
});
// Give ourselves up to a second to connect, using The World's Worst heuristic. If we don't
// manage to connect, it's not the end of the world, as the socket we're connecting to may not
// be up yet.
while (!pollingStarted.get()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
@Override
public void addListener(Type type, Consumer<Event> onType) {
Objects.requireNonNull(type, "Event type must be set.");
Objects.requireNonNull(onType, "Event listener must be set.");
List<Consumer<Event>> typeListeners = listeners.computeIfAbsent(type, t -> new LinkedList<>());
typeListeners.add(onType);
}
@Override
public void fire(Event event) {
Objects.requireNonNull(event, "Event to send must be set.");
pub.sendMore(event.getType().getName().getBytes(UTF_8));
pub.sendMore(event.getId().toString().getBytes(UTF_8));
pub.send(event.getRawData().getBytes(UTF_8));
}
@Override
public void close() {
executor.shutdown();
if (sub != null) {
sub.close();
}
if (pub != null) {
pub.close();
}
}
}
| 1 | 16,464 | I'd keep this at `info` level... | SeleniumHQ-selenium | py |
@@ -49,11 +49,14 @@ module Travis
fi
),
redirect_io: %(
- exec > >(
- %{curl}
- %{exports}
- ruby ~/filter.rb %{args}
- ) 2>&1
+ if [[ -z "$TRAVIS_FILTERED" ]]; then
+ export TRAVIS_FILTERED=1
+ exec 9>&1 1>(
+ %{curl}
+ %{exports}
+ ruby ~/filter.rb %{args}
+ ) 2>&1
+ fi
)
}
| 1 | require 'travis/build/appliances/base'
require 'travis/build/git'
require 'travis/rollout'
module Travis
module Build
module Appliances
class SetupFilter < Base
class Rollout < Struct.new(:data)
def matches?
Travis::Rollout.matches?(:redirect_io, uid: repo_id, owner: owner_login)
end
def repo_id
data.repository[:github_id]
end
def repo_slug
data.repository[:slug].to_s
end
def owner_login
repo_slug.split('/').first
end
end
ENABLED = true
HOST = 'build.travis-ci.com'
MSGS = {
filter: 'Using filter strategy %p for repo %s on job id=%s number=%s'
}
SH = {
curl: %(
curl -sf -o ~/filter.rb %{url}
if [ $? -ne 0 ]; then
echo "Download from %{url} failed. Trying %{fallback_url} ..."
curl -sf -o ~/filter.rb %{fallback_url}
fi
),
pty: %(
if [[ -z "$TRAVIS_FILTERED" ]]; then
export TRAVIS_FILTERED=1
%{curl}
%{exports}
exec ruby ~/filter.rb "/usr/bin/env TERM=xterm /bin/bash --login $HOME/build.sh" %{args}
fi
),
redirect_io: %(
exec > >(
%{curl}
%{exports}
ruby ~/filter.rb %{args}
) 2>&1
)
}
def apply?
enabled? and secrets.any?
end
def apply
info :filter, strategy, data.repository[:slug].to_s, data.job[:id], data.job[:number]
puts code if ENV['ROLLOUT_DEBUG']
sh.raw code
end
private
def code
data = { exports: exports, args: args, url: url, fallback_url: url(HOST) }
curl = SH[:curl] % data
SH[strategy] % data.merge(curl: curl)
end
def enabled?
config[:filter_secrets].nil? ? ENABLED : config[:filter_secrets]
end
def strategy
@strategy ||= Rollout.new(data).matches? ? :redirect_io : :pty
end
def url(host = nil)
host ||= app_host || HOST
url = "https://#{host}/filter/#{strategy}.rb"
Shellwords.escape(url).untaint
end
def args
secrets.size.times.map { |ix| "-e \"SECRET_#{ix}\"" }.join(" ")
end
def exports
values = secrets.map(&:untaint)
values = values.map { |value| Shellwords.escape(value) }
values = values.map.with_index { |value, ix| "export SECRET_#{ix}=#{value}" }
values.join(' ')
end
def secrets
@secrets ||= env.groups.flat_map(&:vars).select(&:secure?).map(&:value)
end
def env
@env ||= Build::Env.new(data)
end
def info(msg, *args)
Travis::Build.logger.info(MSGS[msg] % args)
end
end
end
end
end
| 1 | 15,462 | I believe this env var won't be set anywhere. Do we need this condition? | travis-ci-travis-build | rb |
@@ -14,7 +14,7 @@ function getWindowLocationSearch(win) {
return search || '';
}
-function getParameterByName(name, url) {
+window.getParameterByName = function (name, url) {
'use strict';
name = name.replace(/[\[]/, '\\[').replace(/[\]]/, '\\]'); | 1 | function getWindowLocationSearch(win) {
'use strict';
var search = (win || window).location.search;
if (!search) {
var index = window.location.href.indexOf('?');
if (-1 != index) {
search = window.location.href.substring(index);
}
}
return search || '';
}
function getParameterByName(name, url) {
'use strict';
name = name.replace(/[\[]/, '\\[').replace(/[\]]/, '\\]');
var regexS = '[\\?&]' + name + '=([^&#]*)';
var regex = new RegExp(regexS, 'i');
var results = regex.exec(url || getWindowLocationSearch());
if (null == results) {
return '';
}
return decodeURIComponent(results[1].replace(/\+/g, ' '));
}
function pageClassOn(eventName, className, fn) {
'use strict';
document.addEventListener(eventName, function (event) {
var target = event.target;
if (target.classList.contains(className)) {
fn.call(target, event);
}
});
}
function pageIdOn(eventName, id, fn) {
'use strict';
document.addEventListener(eventName, function (event) {
var target = event.target;
if (target.id === id) {
fn.call(target, event);
}
});
}
var Dashboard = {
getCurrentUser: function () {
return window.ApiClient.getCurrentUser(false);
},
//TODO: investigate url prefix support for serverAddress function
serverAddress: function () {
if (AppInfo.isNativeApp) {
var apiClient = window.ApiClient;
if (apiClient) {
return apiClient.serverAddress();
}
return null;
}
var urlLower = window.location.href.toLowerCase();
var index = urlLower.lastIndexOf('/web');
if (-1 != index) {
return urlLower.substring(0, index);
}
var loc = window.location;
var address = loc.protocol + '//' + loc.hostname;
if (loc.port) {
address += ':' + loc.port;
}
return address;
},
getCurrentUserId: function () {
var apiClient = window.ApiClient;
if (apiClient) {
return apiClient.getCurrentUserId();
}
return null;
},
onServerChanged: function (userId, accessToken, apiClient) {
apiClient = apiClient || window.ApiClient;
window.ApiClient = apiClient;
},
logout: function () {
ConnectionManager.logout().then(function () {
var loginPage;
if (AppInfo.isNativeApp) {
loginPage = 'selectserver.html';
window.ApiClient = null;
} else {
loginPage = 'login.html';
}
Dashboard.navigate(loginPage);
});
},
getConfigurationPageUrl: function (name) {
return 'configurationpage?name=' + encodeURIComponent(name);
},
getConfigurationResourceUrl: function (name) {
if (AppInfo.isNativeApp) {
return ApiClient.getUrl('web/ConfigurationPage', {
name: name
});
}
return Dashboard.getConfigurationPageUrl(name);
},
navigate: function (url, preserveQueryString) {
if (!url) {
throw new Error('url cannot be null or empty');
}
var queryString = getWindowLocationSearch();
if (preserveQueryString && queryString) {
url += queryString;
}
return new Promise(function (resolve, reject) {
require(['appRouter'], function (appRouter) {
return appRouter.show(url).then(resolve, reject);
});
});
},
navigate_direct: function (path) {
return new Promise(function (resolve, reject) {
require(['appRouter'], function (appRouter) {
return appRouter.showDirect(path).then(resolve, reject);
});
});
},
processPluginConfigurationUpdateResult: function () {
require(['loading', 'toast'], function (loading, toast) {
loading.hide();
toast(Globalize.translate('MessageSettingsSaved'));
});
},
processServerConfigurationUpdateResult: function (result) {
require(['loading', 'toast'], function (loading, toast) {
loading.hide();
toast(Globalize.translate('MessageSettingsSaved'));
});
},
processErrorResponse: function (response) {
require(['loading'], function (loading) {
loading.hide();
});
var status = '' + response.status;
if (response.statusText) {
status = response.statusText;
}
Dashboard.alert({
title: status,
message: response.headers ? response.headers.get('X-Application-Error-Code') : null
});
},
alert: function (options) {
if ('string' == typeof options) {
return void require(['toast'], function (toast) {
toast({
text: options
});
});
}
require(['alert'], function (alert) {
alert.default({
title: options.title || Globalize.translate('HeaderAlert'),
text: options.message
}).then(options.callback || function () {});
});
},
capabilities: function (appHost) {
var capabilities = {
PlayableMediaTypes: ['Audio', 'Video'],
SupportedCommands: ['MoveUp', 'MoveDown', 'MoveLeft', 'MoveRight', 'PageUp', 'PageDown', 'PreviousLetter', 'NextLetter', 'ToggleOsd', 'ToggleContextMenu', 'Select', 'Back', 'SendKey', 'SendString', 'GoHome', 'GoToSettings', 'VolumeUp', 'VolumeDown', 'Mute', 'Unmute', 'ToggleMute', 'SetVolume', 'SetAudioStreamIndex', 'SetSubtitleStreamIndex', 'DisplayContent', 'GoToSearch', 'DisplayMessage', 'SetRepeatMode', 'SetShuffleQueue', 'ChannelUp', 'ChannelDown', 'PlayMediaSource', 'PlayTrailers'],
SupportsPersistentIdentifier: 'cordova' === self.appMode || 'android' === self.appMode,
SupportsMediaControl: true
};
appHost.getPushTokenInfo();
return capabilities = Object.assign(capabilities, appHost.getPushTokenInfo());
},
selectServer: function () {
if (window.NativeShell && typeof window.NativeShell.selectServer === 'function') {
window.NativeShell.selectServer();
} else {
Dashboard.navigate('selectserver.html');
}
},
hideLoadingMsg: function() {
'use strict';
require(['loading'], function(loading) {
loading.hide();
});
},
showLoadingMsg: function() {
'use strict';
require(['loading'], function(loading) {
loading.show();
});
},
confirm: function(message, title, callback) {
'use strict';
require(['confirm'], function(confirm) {
confirm(message, title).then(function() {
callback(!0);
}).catch(function() {
callback(!1);
});
});
}
};
var AppInfo = {};
!function () {
'use strict';
function defineConnectionManager(connectionManager) {
window.ConnectionManager = connectionManager;
define('connectionManager', [], function () {
return connectionManager;
});
}
function bindConnectionManagerEvents(connectionManager, events, userSettings) {
window.Events = events;
connectionManager.currentApiClient = function () {
if (!localApiClient) {
var server = connectionManager.getLastUsedServer();
if (server) {
localApiClient = connectionManager.getApiClient(server.Id);
}
}
return localApiClient;
};
connectionManager.onLocalUserSignedIn = function (user) {
localApiClient = connectionManager.getApiClient(user.ServerId);
window.ApiClient = localApiClient;
return userSettings.setUserInfo(user.Id, localApiClient);
};
events.on(connectionManager, 'localusersignedout', function () {
userSettings.setUserInfo(null, null);
});
}
function createConnectionManager() {
return require(['connectionManagerFactory', 'apphost', 'credentialprovider', 'events', 'userSettings'], function (ConnectionManager, apphost, credentialProvider, events, userSettings) {
var credentialProviderInstance = new credentialProvider();
var promises = [apphost.getSyncProfile(), apphost.init()];
return Promise.all(promises).then(function (responses) {
var deviceProfile = responses[0];
var capabilities = Dashboard.capabilities(apphost);
capabilities.DeviceProfile = deviceProfile;
var connectionManager = new ConnectionManager(credentialProviderInstance, apphost.appName(), apphost.appVersion(), apphost.deviceName(), apphost.deviceId(), capabilities);
defineConnectionManager(connectionManager);
bindConnectionManagerEvents(connectionManager, events, userSettings);
if (!AppInfo.isNativeApp) {
console.debug('loading ApiClient singleton');
return require(['apiclient'], function (apiClientFactory) {
console.debug('creating ApiClient singleton');
var apiClient = new apiClientFactory(Dashboard.serverAddress(), apphost.appName(), apphost.appVersion(), apphost.deviceName(), apphost.deviceId());
apiClient.enableAutomaticNetworking = false;
apiClient.manualAddressOnly = true;
connectionManager.addApiClient(apiClient);
window.ApiClient = apiClient;
localApiClient = apiClient;
console.debug('loaded ApiClient singleton');
});
}
return Promise.resolve();
});
});
}
function returnFirstDependency(obj) {
return obj;
}
function returnDefault(obj) {
if (obj.default === null) {
throw new Error('Object has no default!');
}
return obj.default;
}
function getBowerPath() {
return 'libraries';
}
function getComponentsPath() {
return 'components';
}
function getElementsPath() {
return 'elements';
}
function getScriptsPath() {
return 'scripts';
}
function getPlaybackManager(playbackManager) {
window.addEventListener('beforeunload', function () {
try {
playbackManager.onAppClose();
} catch (err) {
console.error('error in onAppClose: ' + err);
}
});
return playbackManager;
}
function getLayoutManager(layoutManager, appHost) {
if (appHost.getDefaultLayout) {
layoutManager.defaultLayout = appHost.getDefaultLayout();
}
layoutManager.init();
return layoutManager;
}
function createSharedAppFooter({default: appFooter}) {
return new appFooter({});
}
function onRequireJsError(requireType, requireModules) {
console.error('RequireJS error: ' + (requireType || 'unknown') + '. Failed modules: ' + (requireModules || []).join(','));
}
function defineResizeObserver() {
if (self.ResizeObserver) {
define('ResizeObserver', [], function () {
return self.ResizeObserver;
});
} else {
define('ResizeObserver', ['resize-observer-polyfill'], returnFirstDependency);
}
}
function initRequireWithBrowser() {
var componentsPath = getComponentsPath();
var scriptsPath = getScriptsPath();
define('filesystem', [scriptsPath + '/filesystem'], returnFirstDependency);
define('lazyLoader', [componentsPath + '/lazyLoader/lazyLoaderIntersectionObserver'], returnFirstDependency);
define('shell', [scriptsPath + '/shell'], returnFirstDependency);
define('alert', [componentsPath + '/alert'], returnFirstDependency);
defineResizeObserver();
define('dialog', [componentsPath + '/dialog/dialog'], returnFirstDependency);
define('confirm', [componentsPath + '/confirm/confirm'], returnFirstDependency);
define('prompt', [componentsPath + '/prompt/prompt'], returnFirstDependency);
define('loading', [componentsPath + '/loading/loading'], returnFirstDependency);
define('multi-download', [scriptsPath + '/multiDownload'], returnFirstDependency);
define('fileDownloader', [scriptsPath + '/fileDownloader'], returnFirstDependency);
define('castSenderApiLoader', [componentsPath + '/castSenderApi'], returnFirstDependency);
}
function init() {
define('livetvcss', ['css!assets/css/livetv.css'], returnFirstDependency);
define('detailtablecss', ['css!assets/css/detailtable.css'], returnFirstDependency);
var promises = [];
if (!window.fetch) {
promises.push(require(['fetch']));
}
Promise.all(promises).then(function () {
createConnectionManager().then(function () {
console.debug('initAfterDependencies promises resolved');
require(['globalize', 'browser'], function (globalize, browser) {
window.Globalize = globalize;
loadCoreDictionary(globalize).then(function () {
onGlobalizeInit(browser, globalize);
});
});
require(['keyboardnavigation'], function(keyboardnavigation) {
keyboardnavigation.enable();
});
require(['mouseManager']);
require(['focusPreventScroll']);
require(['autoFocuser'], function(autoFocuser) {
autoFocuser.enable();
});
require(['globalize', 'connectionManager', 'events'], function (globalize, connectionManager, events) {
events.on(connectionManager, 'localusersignedin', globalize.updateCurrentCulture);
});
});
});
}
function loadCoreDictionary(globalize) {
var languages = ['ar', 'be-by', 'bg-bg', 'ca', 'cs', 'da', 'de', 'el', 'en-gb', 'en-us', 'es', 'es-ar', 'es-mx', 'fa', 'fi', 'fr', 'fr-ca', 'gsw', 'he', 'hi-in', 'hr', 'hu', 'id', 'it', 'kk', 'ko', 'lt-lt', 'ms', 'nb', 'nl', 'pl', 'pt-br', 'pt-pt', 'ro', 'ru', 'sk', 'sl-si', 'sv', 'tr', 'uk', 'vi', 'zh-cn', 'zh-hk', 'zh-tw'];
var translations = languages.map(function (language) {
return {
lang: language,
path: 'strings/' + language + '.json'
};
});
globalize.defaultModule('core');
return globalize.loadStrings({
name: 'core',
translations: translations
});
}
function onGlobalizeInit(browser, globalize) {
if ('android' === self.appMode) {
if (-1 !== self.location.href.toString().toLowerCase().indexOf('start=backgroundsync')) {
return onAppReady(browser);
}
}
document.title = globalize.translateHtml(document.title, 'core');
if (browser.tv && !browser.android) {
console.debug('using system fonts with explicit sizes');
require(['systemFontsSizedCss']);
} else {
console.debug('using default fonts');
require(['systemFontsCss']);
}
require(['apphost', 'css!assets/css/librarybrowser'], function (appHost) {
loadPlugins(appHost, browser).then(function () {
onAppReady(browser);
});
});
}
function loadPlugins(appHost, browser, shell) {
console.debug('loading installed plugins');
var list = [
'plugins/playAccessValidation/plugin',
'plugins/experimentalWarnings/plugin',
'plugins/htmlAudioPlayer/plugin',
'plugins/htmlVideoPlayer/plugin',
'plugins/photoPlayer/plugin',
'plugins/bookPlayer/plugin',
'plugins/youtubePlayer/plugin',
'plugins/backdropScreensaver/plugin',
'plugins/logoScreensaver/plugin'
];
if (appHost.supports('remotecontrol')) {
list.push('plugins/sessionPlayer/plugin');
if (browser.chrome || browser.opera) {
list.push('plugins/chromecastPlayer/plugin');
}
}
if (window.NativeShell) {
list = list.concat(window.NativeShell.getPlugins());
}
return new Promise(function (resolve, reject) {
Promise.all(list.map(loadPlugin)).then(function () {
require(['packageManager'], function (packageManager) {
packageManager.init().then(resolve, reject);
});
}, reject);
});
}
function loadPlugin(url) {
return new Promise(function (resolve, reject) {
require(['pluginManager'], function (pluginManager) {
pluginManager.loadPlugin(url).then(resolve, reject);
});
});
}
function onAppReady(browser) {
console.debug('begin onAppReady');
// ensure that appHost is loaded in this point
require(['apphost', 'appRouter'], function (appHost, appRouter) {
window.Emby = {};
console.debug('onAppReady: loading dependencies');
if (browser.iOS) {
require(['css!assets/css/ios.css']);
}
window.Emby.Page = appRouter;
require(['emby-button', 'scripts/themeLoader', 'libraryMenu', 'scripts/routes'], function () {
Emby.Page.start({
click: false,
hashbang: true
});
require(['components/themeMediaPlayer', 'scripts/autoBackdrops']);
if (!browser.tv && !browser.xboxOne && !browser.ps4) {
require(['components/nowPlayingBar/nowPlayingBar']);
}
if (appHost.supports('remotecontrol')) {
require(['playerSelectionMenu', 'components/playback/remotecontrolautoplay']);
}
require(['libraries/screensavermanager']);
if (!appHost.supports('physicalvolumecontrol') || browser.touch) {
require(['components/playback/volumeosd']);
}
/* eslint-disable-next-line compat/compat */
if (navigator.mediaSession || window.NativeShell) {
require(['mediaSession']);
}
require(['serverNotifications']);
require(['date-fns', 'date-fns/locale']);
if (!browser.tv && !browser.xboxOne) {
require(['components/playback/playbackorientation']);
registerServiceWorker();
if (window.Notification) {
require(['components/notifications/notifications']);
}
}
require(['playerSelectionMenu']);
var apiClient = window.ConnectionManager && window.ConnectionManager.currentApiClient();
if (apiClient) {
fetch(apiClient.getUrl('Branding/Css'))
.then(function(response) {
if (!response.ok) {
throw new Error(response.status + ' ' + response.statusText);
}
return response.text();
})
.then(function(css) {
// Inject the branding css as a dom element in body so it will take
// precedence over other stylesheets
var style = document.createElement('style');
style.appendChild(document.createTextNode(css));
document.body.appendChild(style);
})
.catch(function(err) {
console.warn('Error applying custom css', err);
});
}
});
});
}
function registerServiceWorker() {
/* eslint-disable compat/compat */
if (navigator.serviceWorker && self.appMode !== 'cordova' && self.appMode !== 'android') {
try {
navigator.serviceWorker.register('serviceworker.js');
} catch (err) {
console.error('error registering serviceWorker: ' + err);
}
} else {
console.warn('serviceWorker unsupported');
}
/* eslint-enable compat/compat */
}
function onWebComponentsReady() {
initRequireWithBrowser();
if (self.appMode === 'cordova' || self.appMode === 'android' || self.appMode === 'standalone') {
AppInfo.isNativeApp = true;
}
init();
}
var localApiClient;
(function () {
var urlArgs = 'v=' + (window.dashboardVersion || new Date().getDate());
var bowerPath = getBowerPath();
var componentsPath = getComponentsPath();
var elementsPath = getElementsPath();
var scriptsPath = getScriptsPath();
var paths = {
browserdeviceprofile: 'scripts/browserDeviceProfile',
browser: 'scripts/browser',
libraryBrowser: 'scripts/libraryBrowser',
inputManager: 'scripts/inputManager',
datetime: 'scripts/datetime',
globalize: 'scripts/globalize',
dfnshelper: 'scripts/dfnshelper',
libraryMenu: 'scripts/libraryMenu',
playlisteditor: componentsPath + '/playlisteditor/playlisteditor',
medialibrarycreator: componentsPath + '/mediaLibraryCreator/mediaLibraryCreator',
medialibraryeditor: componentsPath + '/mediaLibraryEditor/mediaLibraryEditor',
imageoptionseditor: componentsPath + '/imageOptionsEditor/imageOptionsEditor',
apphost: componentsPath + '/apphost',
visibleinviewport: bowerPath + '/visibleinviewport',
qualityoptions: componentsPath + '/qualityOptions',
focusManager: componentsPath + '/focusManager',
itemHelper: componentsPath + '/itemHelper',
itemShortcuts: componentsPath + '/shortcuts',
playQueueManager: componentsPath + '/playback/playqueuemanager',
nowPlayingHelper: componentsPath + '/playback/nowplayinghelper',
pluginManager: componentsPath + '/pluginManager',
packageManager: componentsPath + '/packageManager',
screensaverManager: componentsPath + '/screensavermanager',
chromecastHelper: 'plugins/chromecastPlayer/chromecastHelpers'
};
requirejs.onError = onRequireJsError;
requirejs.config({
waitSeconds: 0,
map: {
'*': {
css: 'components/require/requirecss',
text: 'components/require/requiretext'
}
},
bundles: {
bundle: [
'fetch',
'flvjs',
'jstree',
'epubjs',
'jQuery',
'hlsjs',
'howler',
'native-promise-only',
'resize-observer-polyfill',
'shaka',
'swiper',
'queryString',
'sortable',
'webcomponents',
'material-icons',
'jellyfin-noto',
'date-fns',
'page',
'polyfill',
'fast-text-encoding',
'intersection-observer',
'classlist-polyfill',
'screenfull',
'headroom',
'apiclient',
'events',
'credentialprovider',
'connectionManagerFactory',
'appStorage'
]
},
urlArgs: urlArgs,
paths: paths,
onError: onRequireJsError
});
require(['fetch']);
require(['polyfill']);
require(['fast-text-encoding']);
require(['intersection-observer']);
require(['classlist-polyfill']);
// Expose jQuery globally
require(['jQuery'], function(jQuery) {
window.$ = jQuery;
window.jQuery = jQuery;
});
require(['css!assets/css/site']);
require(['jellyfin-noto']);
// define styles
// TODO determine which of these files can be moved to the components themselves
define('systemFontsCss', ['css!assets/css/fonts'], returnFirstDependency);
define('systemFontsSizedCss', ['css!assets/css/fonts.sized'], returnFirstDependency);
define('scrollStyles', ['css!assets/css/scrollstyles'], returnFirstDependency);
define('dashboardcss', ['css!assets/css/dashboard'], returnFirstDependency);
define('programStyles', ['css!' + componentsPath + '/guide/programs'], returnFirstDependency);
define('listViewStyle', ['css!' + componentsPath + '/listview/listview'], returnFirstDependency);
define('formDialogStyle', ['css!' + componentsPath + '/formdialog'], returnFirstDependency);
define('clearButtonStyle', ['css!assets/css/clearbutton'], returnFirstDependency);
define('cardStyle', ['css!' + componentsPath + '/cardbuilder/card'], returnFirstDependency);
define('flexStyles', ['css!assets/css/flexstyles'], returnFirstDependency);
// there are several objects that need to be instantiated
// TODO find a better way to do this
define('appFooter', [componentsPath + '/appFooter/appFooter'], returnFirstDependency);
define('appFooter-shared', ['appFooter'], createSharedAppFooter);
// TODO remove these libraries
// all of these have been modified so we need to fix that first
define('scroller', [bowerPath + '/scroller'], returnFirstDependency);
define('navdrawer', [bowerPath + '/navdrawer/navdrawer'], returnFirstDependency);
define('emby-button', [elementsPath + '/emby-button/emby-button'], returnFirstDependency);
define('paper-icon-button-light', [elementsPath + '/emby-button/paper-icon-button-light'], returnFirstDependency);
define('emby-checkbox', [elementsPath + '/emby-checkbox/emby-checkbox'], returnFirstDependency);
define('emby-collapse', [elementsPath + '/emby-collapse/emby-collapse'], returnFirstDependency);
define('emby-input', [elementsPath + '/emby-input/emby-input'], returnFirstDependency);
define('emby-progressring', [elementsPath + '/emby-progressring/emby-progressring'], returnFirstDependency);
define('emby-radio', [elementsPath + '/emby-radio/emby-radio'], returnFirstDependency);
define('emby-select', [elementsPath + '/emby-select/emby-select'], returnFirstDependency);
define('emby-slider', [elementsPath + '/emby-slider/emby-slider'], returnFirstDependency);
define('emby-textarea', [elementsPath + '/emby-textarea/emby-textarea'], returnFirstDependency);
define('emby-toggle', [elementsPath + '/emby-toggle/emby-toggle'], returnFirstDependency);
define('emby-scroller', [elementsPath + '/emby-scroller/emby-scroller'], returnFirstDependency);
define('emby-tabs', [elementsPath + '/emby-tabs/emby-tabs'], returnFirstDependency);
define('emby-scrollbuttons', [elementsPath + '/emby-scrollbuttons/emby-scrollbuttons'], returnFirstDependency);
define('emby-itemrefreshindicator', [elementsPath + '/emby-itemrefreshindicator/emby-itemrefreshindicator'], returnFirstDependency);
define('emby-itemscontainer', [elementsPath + '/emby-itemscontainer/emby-itemscontainer'], returnFirstDependency);
define('emby-playstatebutton', [elementsPath + '/emby-playstatebutton/emby-playstatebutton'], returnFirstDependency);
define('emby-ratingbutton', [elementsPath + '/emby-ratingbutton/emby-ratingbutton'], returnFirstDependency);
define('emby-progressbar', [elementsPath + '/emby-progressbar/emby-progressbar'], returnFirstDependency);
define('emby-programcell', [elementsPath + '/emby-programcell/emby-programcell'], returnFirstDependency);
define('webSettings', [scriptsPath + '/settings/webSettings'], returnFirstDependency);
define('appSettings', [scriptsPath + '/settings/appSettings'], returnFirstDependency);
define('userSettings', [scriptsPath + '/settings/userSettings'], returnFirstDependency);
define('mediaSession', [componentsPath + '/playback/mediasession'], returnFirstDependency);
define('actionsheet', [componentsPath + '/actionSheet/actionSheet'], returnFirstDependency);
define('tunerPicker', [componentsPath + '/tunerPicker'], returnFirstDependency);
define('mainTabsManager', [componentsPath + '/maintabsmanager'], returnFirstDependency);
define('imageLoader', [componentsPath + '/images/imageLoader'], returnFirstDependency);
define('directorybrowser', [componentsPath + '/directorybrowser/directorybrowser'], returnFirstDependency);
define('metadataEditor', [componentsPath + '/metadataEditor/metadataEditor'], returnFirstDependency);
define('personEditor', [componentsPath + '/metadataEditor/personEditor'], returnFirstDependency);
define('playerSelectionMenu', [componentsPath + '/playback/playerSelectionMenu'], returnFirstDependency);
define('playerSettingsMenu', [componentsPath + '/playback/playersettingsmenu'], returnFirstDependency);
define('playMethodHelper', [componentsPath + '/playback/playmethodhelper'], returnFirstDependency);
define('brightnessOsd', [componentsPath + '/playback/brightnessosd'], returnFirstDependency);
define('alphaNumericShortcuts', [scriptsPath + '/alphanumericshortcuts'], returnFirstDependency);
define('multiSelect', [componentsPath + '/multiSelect/multiSelect'], returnFirstDependency);
define('alphaPicker', [componentsPath + '/alphaPicker/alphaPicker'], returnFirstDependency);
define('tabbedView', [componentsPath + '/tabbedview/tabbedview'], returnFirstDependency);
define('itemsTab', [componentsPath + '/tabbedview/itemstab'], returnFirstDependency);
define('collectionEditor', [componentsPath + '/collectionEditor/collectionEditor'], returnFirstDependency);
define('playlistEditor', [componentsPath + '/playlisteditor/playlisteditor'], returnFirstDependency);
define('recordingCreator', [componentsPath + '/recordingcreator/recordingcreator'], returnFirstDependency);
define('recordingEditor', [componentsPath + '/recordingcreator/recordingeditor'], returnFirstDependency);
define('seriesRecordingEditor', [componentsPath + '/recordingcreator/seriesrecordingeditor'], returnFirstDependency);
define('recordingFields', [componentsPath + '/recordingcreator/recordingfields'], returnFirstDependency);
define('recordingButton', [componentsPath + '/recordingcreator/recordingbutton'], returnFirstDependency);
define('recordingHelper', [componentsPath + '/recordingcreator/recordinghelper'], returnFirstDependency);
define('subtitleEditor', [componentsPath + '/subtitleeditor/subtitleeditor'], returnFirstDependency);
define('subtitleSync', [componentsPath + '/subtitlesync/subtitlesync'], returnFirstDependency);
define('itemIdentifier', [componentsPath + '/itemidentifier/itemidentifier'], returnFirstDependency);
define('itemMediaInfo', [componentsPath + '/itemMediaInfo/itemMediaInfo'], returnFirstDependency);
define('mediaInfo', [componentsPath + '/mediainfo/mediainfo'], returnFirstDependency);
define('itemContextMenu', [componentsPath + '/itemContextMenu'], returnFirstDependency);
define('imageEditor', [componentsPath + '/imageeditor/imageeditor'], returnFirstDependency);
define('imageDownloader', [componentsPath + '/imageDownloader/imageDownloader'], returnFirstDependency);
define('dom', [scriptsPath + '/dom'], returnFirstDependency);
define('playerStats', [componentsPath + '/playerstats/playerstats'], returnFirstDependency);
define('searchFields', [componentsPath + '/search/searchfields'], returnFirstDependency);
define('searchResults', [componentsPath + '/search/searchresults'], returnFirstDependency);
define('upNextDialog', [componentsPath + '/upnextdialog/upnextdialog'], returnFirstDependency);
define('subtitleAppearanceHelper', [componentsPath + '/subtitlesettings/subtitleappearancehelper'], returnFirstDependency);
define('subtitleSettings', [componentsPath + '/subtitlesettings/subtitlesettings'], returnFirstDependency);
define('settingsHelper', [componentsPath + '/settingshelper'], returnFirstDependency);
define('displaySettings', [componentsPath + '/displaySettings/displaySettings'], returnFirstDependency);
define('playbackSettings', [componentsPath + '/playbackSettings/playbackSettings'], returnFirstDependency);
define('homescreenSettings', [componentsPath + '/homeScreenSettings/homeScreenSettings'], returnFirstDependency);
define('playbackManager', [componentsPath + '/playback/playbackmanager'], getPlaybackManager);
define('timeSyncManager', [componentsPath + '/syncPlay/timeSyncManager'], returnDefault);
define('groupSelectionMenu', [componentsPath + '/syncPlay/groupSelectionMenu'], returnFirstDependency);
define('syncPlayManager', [componentsPath + '/syncPlay/syncPlayManager'], returnDefault);
define('playbackPermissionManager', [componentsPath + '/syncPlay/playbackPermissionManager'], returnDefault);
define('layoutManager', [componentsPath + '/layoutManager', 'apphost'], getLayoutManager);
define('homeSections', [componentsPath + '/homesections/homesections'], returnFirstDependency);
define('playMenu', [componentsPath + '/playmenu'], returnFirstDependency);
define('refreshDialog', [componentsPath + '/refreshdialog/refreshdialog'], returnFirstDependency);
define('backdrop', [componentsPath + '/backdrop/backdrop'], returnFirstDependency);
define('fetchHelper', [componentsPath + '/fetchhelper'], returnFirstDependency);
define('cardBuilder', [componentsPath + '/cardbuilder/cardBuilder'], returnFirstDependency);
define('peoplecardbuilder', [componentsPath + '/cardbuilder/peoplecardbuilder'], returnFirstDependency);
define('chaptercardbuilder', [componentsPath + '/cardbuilder/chaptercardbuilder'], returnFirstDependency);
define('deleteHelper', [scriptsPath + '/deleteHelper'], returnFirstDependency);
define('tvguide', [componentsPath + '/guide/guide'], returnFirstDependency);
define('guide-settings-dialog', [componentsPath + '/guide/guide-settings'], returnFirstDependency);
define('viewManager', [componentsPath + '/viewManager/viewManager'], function (viewManager) {
window.ViewManager = viewManager;
viewManager.dispatchPageEvents(true);
return viewManager;
});
define('slideshow', [componentsPath + '/slideshow/slideshow'], returnFirstDependency);
define('focusPreventScroll', ['legacy/focusPreventScroll'], returnFirstDependency);
define('userdataButtons', [componentsPath + '/userdatabuttons/userdatabuttons'], returnFirstDependency);
define('listView', [componentsPath + '/listview/listview'], returnFirstDependency);
define('indicators', [componentsPath + '/indicators/indicators'], returnFirstDependency);
define('viewSettings', [componentsPath + '/viewSettings/viewSettings'], returnFirstDependency);
define('filterMenu', [componentsPath + '/filtermenu/filtermenu'], returnFirstDependency);
define('sortMenu', [componentsPath + '/sortmenu/sortmenu'], returnFirstDependency);
define('sanitizefilename', [componentsPath + '/sanitizeFilename'], returnFirstDependency);
define('toast', [componentsPath + '/toast/toast'], returnFirstDependency);
define('scrollHelper', [scriptsPath + '/scrollHelper'], returnFirstDependency);
define('touchHelper', [scriptsPath + '/touchHelper'], returnFirstDependency);
define('imageUploader', [componentsPath + '/imageUploader/imageUploader'], returnFirstDependency);
define('htmlMediaHelper', [componentsPath + '/htmlMediaHelper'], returnFirstDependency);
define('viewContainer', [componentsPath + '/viewContainer'], returnFirstDependency);
define('dialogHelper', [componentsPath + '/dialogHelper/dialogHelper'], returnFirstDependency);
define('serverNotifications', [scriptsPath + '/serverNotifications'], returnFirstDependency);
define('skinManager', [componentsPath + '/skinManager'], returnFirstDependency);
define('keyboardnavigation', [scriptsPath + '/keyboardNavigation'], returnFirstDependency);
define('mouseManager', [scriptsPath + '/mouseManager'], returnFirstDependency);
define('scrollManager', [componentsPath + '/scrollManager'], returnFirstDependency);
define('autoFocuser', [componentsPath + '/autoFocuser'], returnFirstDependency);
define('connectionManager', [], function () {
return ConnectionManager;
});
define('apiClientResolver', [], function () {
return function () {
return window.ApiClient;
};
});
define('appRouter', [componentsPath + '/appRouter', 'itemHelper'], function (appRouter, itemHelper) {
function showItem(item, serverId, options) {
if ('string' == typeof item) {
require(['connectionManager'], function (connectionManager) {
var apiClient = connectionManager.currentApiClient();
apiClient.getItem(apiClient.getCurrentUserId(), item).then(function (item) {
appRouter.showItem(item, options);
});
});
} else {
if (2 == arguments.length) {
options = arguments[1];
}
appRouter.show('/' + appRouter.getRouteUrl(item, options), {
item: item
});
}
}
appRouter.showLocalLogin = function (serverId, manualLogin) {
Dashboard.navigate('login.html?serverid=' + serverId);
};
appRouter.showVideoOsd = function () {
return Dashboard.navigate('videoosd.html');
};
appRouter.showSelectServer = function () {
Dashboard.navigate(AppInfo.isNativeApp ? 'selectserver.html' : 'login.html');
};
appRouter.showWelcome = function () {
Dashboard.navigate(AppInfo.isNativeApp ? 'selectserver.html' : 'login.html');
};
appRouter.showSettings = function () {
Dashboard.navigate('mypreferencesmenu.html');
};
appRouter.showGuide = function () {
Dashboard.navigate('livetv.html?tab=1');
};
appRouter.goHome = function () {
Dashboard.navigate('home.html');
};
appRouter.showSearch = function () {
Dashboard.navigate('search.html');
};
appRouter.showLiveTV = function () {
Dashboard.navigate('livetv.html');
};
appRouter.showRecordedTV = function () {
Dashboard.navigate('livetv.html?tab=3');
};
appRouter.showFavorites = function () {
Dashboard.navigate('home.html?tab=1');
};
appRouter.showSettings = function () {
Dashboard.navigate('mypreferencesmenu.html');
};
appRouter.setTitle = function (title) {
LibraryMenu.setTitle(title);
};
appRouter.getRouteUrl = function (item, options) {
if (!item) {
throw new Error('item cannot be null');
}
if (item.url) {
return item.url;
}
var context = options ? options.context : null;
var id = item.Id || item.ItemId;
if (!options) {
options = {};
}
var url;
var itemType = item.Type || (options ? options.itemType : null);
var serverId = item.ServerId || options.serverId;
if ('settings' === item) {
return 'mypreferencesmenu.html';
}
if ('wizard' === item) {
return 'wizardstart.html';
}
if ('manageserver' === item) {
return 'dashboard.html';
}
if ('recordedtv' === item) {
return 'livetv.html?tab=3&serverId=' + options.serverId;
}
if ('nextup' === item) {
return 'list.html?type=nextup&serverId=' + options.serverId;
}
if ('list' === item) {
var url = 'list.html?serverId=' + options.serverId + '&type=' + options.itemTypes;
if (options.isFavorite) {
url += '&IsFavorite=true';
}
return url;
}
if ('livetv' === item) {
if ('programs' === options.section) {
return 'livetv.html?tab=0&serverId=' + options.serverId;
}
if ('guide' === options.section) {
return 'livetv.html?tab=1&serverId=' + options.serverId;
}
if ('movies' === options.section) {
return 'list.html?type=Programs&IsMovie=true&serverId=' + options.serverId;
}
if ('shows' === options.section) {
return 'list.html?type=Programs&IsSeries=true&IsMovie=false&IsNews=false&serverId=' + options.serverId;
}
if ('sports' === options.section) {
return 'list.html?type=Programs&IsSports=true&serverId=' + options.serverId;
}
if ('kids' === options.section) {
return 'list.html?type=Programs&IsKids=true&serverId=' + options.serverId;
}
if ('news' === options.section) {
return 'list.html?type=Programs&IsNews=true&serverId=' + options.serverId;
}
if ('onnow' === options.section) {
return 'list.html?type=Programs&IsAiring=true&serverId=' + options.serverId;
}
if ('dvrschedule' === options.section) {
return 'livetv.html?tab=4&serverId=' + options.serverId;
}
if ('seriesrecording' === options.section) {
return 'livetv.html?tab=5&serverId=' + options.serverId;
}
return 'livetv.html?serverId=' + options.serverId;
}
if ('SeriesTimer' == itemType) {
return 'details?seriesTimerId=' + id + '&serverId=' + serverId;
}
if ('livetv' == item.CollectionType) {
return 'livetv.html';
}
if ('Genre' === item.Type) {
url = 'list.html?genreId=' + item.Id + '&serverId=' + serverId;
if ('livetv' === context) {
url += '&type=Programs';
}
if (options.parentId) {
url += '&parentId=' + options.parentId;
}
return url;
}
if ('MusicGenre' === item.Type) {
url = 'list.html?musicGenreId=' + item.Id + '&serverId=' + serverId;
if (options.parentId) {
url += '&parentId=' + options.parentId;
}
return url;
}
if ('Studio' === item.Type) {
url = 'list.html?studioId=' + item.Id + '&serverId=' + serverId;
if (options.parentId) {
url += '&parentId=' + options.parentId;
}
return url;
}
if ('folders' !== context && !itemHelper.isLocalItem(item)) {
if ('movies' == item.CollectionType) {
url = 'movies.html?topParentId=' + item.Id;
if (options && 'latest' === options.section) {
url += '&tab=1';
}
return url;
}
if ('tvshows' == item.CollectionType) {
url = 'tv.html?topParentId=' + item.Id;
if (options && 'latest' === options.section) {
url += '&tab=2';
}
return url;
}
if ('music' == item.CollectionType) {
return 'music.html?topParentId=' + item.Id;
}
}
var itemTypes = ['Playlist', 'TvChannel', 'Program', 'BoxSet', 'MusicAlbum', 'MusicGenre', 'Person', 'Recording', 'MusicArtist'];
if (itemTypes.indexOf(itemType) >= 0) {
return 'details?id=' + id + '&serverId=' + serverId;
}
var contextSuffix = context ? '&context=' + context : '';
if ('Series' == itemType || 'Season' == itemType || 'Episode' == itemType) {
return 'details?id=' + id + contextSuffix + '&serverId=' + serverId;
}
if (item.IsFolder) {
if (id) {
return 'list.html?parentId=' + id + '&serverId=' + serverId;
}
return '#';
}
return 'details?id=' + id + '&serverId=' + serverId;
};
appRouter.showItem = showItem;
return appRouter;
});
})();
return onWebComponentsReady();
}();
pageClassOn('viewshow', 'standalonePage', function () {
document.querySelector('.skinHeader').classList.add('noHeaderRight');
});
pageClassOn('viewhide', 'standalonePage', function () {
document.querySelector('.skinHeader').classList.remove('noHeaderRight');
});
| 1 | 16,734 | why add function explicitly? | jellyfin-jellyfin-web | js |
@@ -92,7 +92,8 @@ func newLevel() *level {
// New will create a default sublist
func NewSublist() *Sublist {
- return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
+ // return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
+ return &Sublist{root: newLevel()}
}
// Insert adds a subscription into the sublist | 1 | // Copyright 2016-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sublist is a routing mechanism to handle subject distribution
// and provides a facility to match subjects from published messages to
// interested subscribers. Subscribers can have wildcard subjects to match
// multiple published subjects.
package server
import (
"bytes"
"errors"
"strings"
"sync"
"sync/atomic"
)
// Common byte variables for wildcards and token separator.
const (
pwc = '*'
fwc = '>'
tsep = "."
btsep = '.'
)
// Sublist related errors
var (
ErrInvalidSubject = errors.New("sublist: Invalid Subject")
ErrNotFound = errors.New("sublist: No Matches Found")
)
const (
// cacheMax is used to bound limit the frontend cache
slCacheMax = 1024
// plistMin is our lower bounds to create a fast plist for Match.
plistMin = 256
)
// A result structure better optimized for queue subs.
type SublistResult struct {
psubs []*subscription
qsubs [][]*subscription // don't make this a map, too expensive to iterate
}
// A Sublist stores and efficiently retrieves subscriptions.
type Sublist struct {
sync.RWMutex
genid uint64
matches uint64
cacheHits uint64
inserts uint64
removes uint64
cache map[string]*SublistResult
root *level
count uint32
}
// A node contains subscriptions and a pointer to the next level.
type node struct {
next *level
psubs map[*subscription]*subscription
qsubs map[string](map[*subscription]*subscription)
plist []*subscription
}
// A level represents a group of nodes and special pointers to
// wildcard nodes.
type level struct {
nodes map[string]*node
pwc, fwc *node
}
// Create a new default node.
func newNode() *node {
return &node{psubs: make(map[*subscription]*subscription)}
}
// Create a new default level.
func newLevel() *level {
return &level{nodes: make(map[string]*node)}
}
// New will create a default sublist
func NewSublist() *Sublist {
return &Sublist{root: newLevel(), cache: make(map[string]*SublistResult)}
}
// Insert adds a subscription into the sublist
func (s *Sublist) Insert(sub *subscription) error {
// copy the subject since we hold this and this might be part of a large byte slice.
subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
sfwc := false
l := s.root
var n *node
for _, t := range tokens {
lt := len(t)
if lt == 0 || sfwc {
s.Unlock()
return ErrInvalidSubject
}
if lt > 1 {
n = l.nodes[t]
} else {
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
}
if n == nil {
n = newNode()
if lt > 1 {
l.nodes[t] = n
} else {
switch t[0] {
case pwc:
l.pwc = n
case fwc:
l.fwc = n
default:
l.nodes[t] = n
}
}
}
if n.next == nil {
n.next = newLevel()
}
l = n.next
}
if sub.queue == nil {
n.psubs[sub] = sub
if n.plist != nil {
n.plist = append(n.plist, sub)
} else if len(n.psubs) > plistMin {
n.plist = make([]*subscription, 0, len(n.psubs))
// Populate
for _, psub := range n.psubs {
n.plist = append(n.plist, psub)
}
}
} else {
if n.qsubs == nil {
n.qsubs = make(map[string]map[*subscription]*subscription)
}
qname := string(sub.queue)
// This is a queue subscription
subs, ok := n.qsubs[qname]
if !ok {
subs = make(map[*subscription]*subscription)
n.qsubs[qname] = subs
}
subs[sub] = sub
}
s.count++
s.inserts++
s.addToCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
s.Unlock()
return nil
}
// Deep copy
func copyResult(r *SublistResult) *SublistResult {
nr := &SublistResult{}
nr.psubs = append([]*subscription(nil), r.psubs...)
for _, qr := range r.qsubs {
nqr := append([]*subscription(nil), qr...)
nr.qsubs = append(nr.qsubs, nqr)
}
return nr
}
// addToCache will add the new entry to existing cache
// entries if needed. Assumes write lock is held.
func (s *Sublist) addToCache(subject string, sub *subscription) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := copyResult(r)
if sub.queue == nil {
nr.psubs = append(nr.psubs, sub)
} else {
if i := findQSliceForSub(sub, nr.qsubs); i >= 0 {
nr.qsubs[i] = append(nr.qsubs[i], sub)
} else {
nr.qsubs = append(nr.qsubs, []*subscription{sub})
}
}
s.cache[k] = nr
}
}
}
// removeFromCache will remove the sub from any active cache entries.
// Assumes write lock is held.
func (s *Sublist) removeFromCache(subject string, sub *subscription) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referecing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
}
// Match will match all entries to the literal subject.
// It will return a set of results for both normal and queue subscribers.
func (s *Sublist) Match(subject string) *SublistResult {
s.RLock()
atomic.AddUint64(&s.matches, 1)
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
atomic.AddUint64(&s.cacheHits, 1)
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
// FIXME(dlc) - Make shared pool between sublist and client readLoop?
result := &SublistResult{}
s.Lock()
matchLevel(s.root, tokens, result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
}
// This will add in a node's results to the total results.
func addNodeToResults(n *node, results *SublistResult) {
// Normal subscriptions
if n.plist != nil {
results.psubs = append(results.psubs, n.plist...)
} else {
for _, psub := range n.psubs {
results.psubs = append(results.psubs, psub)
}
}
// Queue subscriptions
for qname, qr := range n.qsubs {
if len(qr) == 0 {
continue
}
tsub := &subscription{subject: nil, queue: []byte(qname)}
// Need to find matching list in results
if i := findQSliceForSub(tsub, results.qsubs); i >= 0 {
for _, sub := range qr {
results.qsubs[i] = append(results.qsubs[i], sub)
}
} else {
var nqsub []*subscription
for _, sub := range qr {
nqsub = append(nqsub, sub)
}
results.qsubs = append(results.qsubs, nqsub)
}
}
}
// We do not use a map here since we want iteration to be past when
// processing publishes in L1 on client. So we need to walk sequentially
// for now. Keep an eye on this in case we start getting large number of
// different queue subscribers for the same subject.
func findQSliceForSub(sub *subscription, qsl [][]*subscription) int {
if sub.queue == nil {
return -1
}
for i, qr := range qsl {
if len(qr) > 0 && bytes.Equal(sub.queue, qr[0].queue) {
return i
}
}
return -1
}
// matchLevel is used to recursively descend into the trie.
func matchLevel(l *level, toks []string, results *SublistResult) {
var pwc, n *node
for i, t := range toks {
if l == nil {
return
}
if l.fwc != nil {
addNodeToResults(l.fwc, results)
}
if pwc = l.pwc; pwc != nil {
matchLevel(pwc.next, toks[i+1:], results)
}
n = l.nodes[t]
if n != nil {
l = n.next
} else {
l = nil
}
}
if n != nil {
addNodeToResults(n, results)
}
if pwc != nil {
addNodeToResults(pwc, results)
}
}
// lnt is used to track descent into levels for a removal for pruning.
type lnt struct {
l *level
n *node
t string
}
// Raw low level remove, can do batches with lock held outside.
func (s *Sublist) remove(sub *subscription, shouldLock bool) error {
subject := string(sub.subject)
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
if shouldLock {
s.Lock()
defer s.Unlock()
}
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
lt := len(t)
if lt == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
if lt > 1 {
n = l.nodes[t]
} else {
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, sub) {
return ErrNotFound
}
s.count--
s.removes++
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject, sub)
atomic.AddUint64(&s.genid, 1)
return nil
}
// Remove will remove a subscription.
func (s *Sublist) Remove(sub *subscription) error {
return s.remove(sub, true)
}
// RemoveBatch will remove a list of subscriptions.
func (s *Sublist) RemoveBatch(subs []*subscription) error {
s.Lock()
defer s.Unlock()
for _, sub := range subs {
if err := s.remove(sub, false); err != nil {
return err
}
}
return nil
}
// pruneNode is used to prune an empty node from the tree.
func (l *level) pruneNode(n *node, t string) {
if n == nil {
return
}
if n == l.fwc {
l.fwc = nil
} else if n == l.pwc {
l.pwc = nil
} else {
delete(l.nodes, t)
}
}
// isEmpty will test if the node has any entries. Used
// in pruning.
func (n *node) isEmpty() bool {
if len(n.psubs) == 0 && len(n.qsubs) == 0 {
if n.next == nil || n.next.numNodes() == 0 {
return true
}
}
return false
}
// Return the number of nodes for the given level.
func (l *level) numNodes() int {
num := len(l.nodes)
if l.pwc != nil {
num++
}
if l.fwc != nil {
num++
}
return num
}
// Remove the sub for the given node.
func (s *Sublist) removeFromNode(n *node, sub *subscription) (found bool) {
if n == nil {
return false
}
if sub.queue == nil {
_, found = n.psubs[sub]
delete(n.psubs, sub)
if found && n.plist != nil {
// This will brute force remove the plist to perform
// correct behavior. Will get repopulated on a call
//to Match as needed.
n.plist = nil
}
return found
}
// We have a queue group subscription here
qname := string(sub.queue)
qsub := n.qsubs[qname]
_, found = qsub[sub]
delete(qsub, sub)
if len(qsub) == 0 {
delete(n.qsubs, qname)
}
return found
}
// Count returns the number of subscriptions.
func (s *Sublist) Count() uint32 {
s.RLock()
defer s.RUnlock()
return s.count
}
// CacheCount returns the number of result sets in the cache.
func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
}
// Public stats for the sublist
type SublistStats struct {
NumSubs uint32 `json:"num_subscriptions"`
NumCache uint32 `json:"num_cache"`
NumInserts uint64 `json:"num_inserts"`
NumRemoves uint64 `json:"num_removes"`
NumMatches uint64 `json:"num_matches"`
CacheHitRate float64 `json:"cache_hit_rate"`
MaxFanout uint32 `json:"max_fanout"`
AvgFanout float64 `json:"avg_fanout"`
}
// Stats will return a stats structure for the current state.
func (s *Sublist) Stats() *SublistStats {
s.Lock()
defer s.Unlock()
st := &SublistStats{}
st.NumSubs = s.count
st.NumCache = uint32(len(s.cache))
st.NumInserts = s.inserts
st.NumRemoves = s.removes
st.NumMatches = atomic.LoadUint64(&s.matches)
if st.NumMatches > 0 {
st.CacheHitRate = float64(atomic.LoadUint64(&s.cacheHits)) / float64(st.NumMatches)
}
// whip through cache for fanout stats
tot, max := 0, 0
for _, r := range s.cache {
l := len(r.psubs) + len(r.qsubs)
tot += l
if l > max {
max = l
}
}
st.MaxFanout = uint32(max)
if tot > 0 {
st.AvgFanout = float64(tot) / float64(len(s.cache))
}
return st
}
// numLevels will return the maximum number of levels
// contained in the Sublist tree.
func (s *Sublist) numLevels() int {
return visitLevel(s.root, 0)
}
// visitLevel is used to descend the Sublist tree structure
// recursively.
func visitLevel(l *level, depth int) int {
if l == nil || l.numNodes() == 0 {
return depth
}
depth++
maxDepth := depth
for _, n := range l.nodes {
if n == nil {
continue
}
newDepth := visitLevel(n.next, depth)
if newDepth > maxDepth {
maxDepth = newDepth
}
}
if l.pwc != nil {
pwcDepth := visitLevel(l.pwc.next, depth)
if pwcDepth > maxDepth {
maxDepth = pwcDepth
}
}
if l.fwc != nil {
fwcDepth := visitLevel(l.fwc.next, depth)
if fwcDepth > maxDepth {
maxDepth = fwcDepth
}
}
return maxDepth
}
// IsValidSubject returns true if a subject is valid, false otherwise
func IsValidSubject(subject string) bool {
if subject == "" {
return false
}
sfwc := false
tokens := strings.Split(subject, tsep)
for _, t := range tokens {
if len(t) == 0 || sfwc {
return false
}
if len(t) > 1 {
continue
}
switch t[0] {
case fwc:
sfwc = true
}
}
return true
}
// IsValidLiteralSubject returns true if a subject is valid and literal (no wildcards), false otherwise
func IsValidLiteralSubject(subject string) bool {
tokens := strings.Split(subject, tsep)
for _, t := range tokens {
if len(t) == 0 {
return false
}
if len(t) > 1 {
continue
}
switch t[0] {
case pwc, fwc:
return false
}
}
return true
}
// matchLiteral is used to test literal subjects, those that do not have any
// wildcards, with a target subject. This is used in the cache layer.
func matchLiteral(literal, subject string) bool {
li := 0
ll := len(literal)
ls := len(subject)
for i := 0; i < ls; i++ {
if li >= ll {
return false
}
// This function has been optimized for speed.
// For instance, do not set b:=subject[i] here since
// we may bump `i` in this loop to avoid `continue` or
// skiping common test in a particular test.
// Run Benchmark_SublistMatchLiteral before making any change.
switch subject[i] {
case pwc:
// NOTE: This is not testing validity of a subject, instead ensures
// that wildcards are treated as such if they follow some basic rules,
// namely that they are a token on their own.
if i == 0 || subject[i-1] == btsep {
if i == ls-1 {
// There is no more token in the subject after this wildcard.
// Skip token in literal and expect to not find a separator.
for {
// End of literal, this is a match.
if li >= ll {
return true
}
// Presence of separator, this can't be a match.
if literal[li] == btsep {
return false
}
li++
}
} else if subject[i+1] == btsep {
// There is another token in the subject after this wildcard.
// Skip token in literal and expect to get a separator.
for {
// We found the end of the literal before finding a separator,
// this can't be a match.
if li >= ll {
return false
}
if literal[li] == btsep {
break
}
li++
}
// Bump `i` since we know there is a `.` following, we are
// safe. The common test below is going to check `.` with `.`
// which is good. A `continue` here is too costly.
i++
}
}
case fwc:
// For `>` to be a wildcard, it means being the only or last character
// in the string preceded by a `.`
if (i == 0 || subject[i-1] == btsep) && i == ls-1 {
return true
}
}
if subject[i] != literal[li] {
return false
}
li++
}
// Make sure we have processed all of the literal's chars..
return li >= ll
}
func addLocalSub(sub *subscription, subs *[]*subscription) {
if sub != nil && sub.client != nil && sub.client.typ == CLIENT {
*subs = append(*subs, sub)
}
}
func (s *Sublist) addNodeToSubs(n *node, subs *[]*subscription) {
// Normal subscriptions
if n.plist != nil {
for _, sub := range n.plist {
addLocalSub(sub, subs)
}
} else {
for _, sub := range n.psubs {
addLocalSub(sub, subs)
}
}
// Queue subscriptions
for _, qr := range n.qsubs {
for _, sub := range qr {
addLocalSub(sub, subs)
}
}
}
func (s *Sublist) collectLocalSubs(l *level, subs *[]*subscription) {
if len(l.nodes) > 0 {
for _, n := range l.nodes {
s.addNodeToSubs(n, subs)
s.collectLocalSubs(n.next, subs)
}
}
if l.pwc != nil {
s.addNodeToSubs(l.pwc, subs)
s.collectLocalSubs(l.pwc.next, subs)
}
if l.fwc != nil {
s.addNodeToSubs(l.fwc, subs)
s.collectLocalSubs(l.fwc.next, subs)
}
}
// Return all local client subscriptions. Use the supplied slice.
func (s *Sublist) localSubs(subs *[]*subscription) {
s.RLock()
s.collectLocalSubs(s.root, subs)
s.RUnlock()
}
| 1 | 7,715 | Is that intentional? If you don't create the cache here, it will never be created therefore used. | nats-io-nats-server | go |
@@ -136,4 +136,15 @@ func TestStream(t *testing.T) {
require.Greater(t, streamer.fetchCount, 1, "expected more than one call to Fetch within timeout")
require.Greater(t, streamer.notifyCount, 1, "expected more than one call to Notify within timeout")
})
+
+ t.Run("nextFetchDate works correctly to grab times before the timeout.", func(t *testing.T) {
+ for r := 0; r < 1000; r++ {
+ now := time.Now()
+ a := nextFetchDate(now, 0)
+ require.True(t, a.Before(now.Add(4*time.Second)), "require that the given date for 0 retries is less than 4s in the future")
+ b := nextFetchDate(now, 10)
+ require.True(t, b.Before(now.Add(32*time.Second)), "require that the given date for 10 retries is never more than the max interval")
+ }
+
+ })
} | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package stream
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// counterStreamer counts the number of times Fetch and Notify are invoked.
type counterStreamer struct {
fetchCount int
notifyCount int
done chan struct{}
next func() time.Time
}
func (s *counterStreamer) Fetch() (time.Time, error) {
s.fetchCount += 1
return s.next(), nil
}
func (s *counterStreamer) Notify() {
s.notifyCount += 1
}
func (s *counterStreamer) Close() {}
func (s *counterStreamer) Done() <-chan struct{} {
if s.done == nil {
s.done = make(chan struct{})
}
return s.done
}
// errStreamer returns an error when Fetch is invoked.
type errStreamer struct {
err error
done chan struct{}
}
func (s *errStreamer) Fetch() (time.Time, error) {
return time.Now(), s.err
}
func (s *errStreamer) Notify() {}
func (s *errStreamer) Close() {}
func (s *errStreamer) Done() <-chan struct{} {
if s.done == nil {
s.done = make(chan struct{})
}
return s.done
}
func TestStream(t *testing.T) {
t.Run("short-circuits immediately if context is canceled", func(t *testing.T) {
// GIVEN
ctx, cancel := context.WithCancel(context.Background())
cancel() // call cancel immediately.
streamer := &counterStreamer{
next: func() time.Time {
return time.Now()
},
}
// WHEN
err := Stream(ctx, streamer)
// THEN
require.EqualError(t, err, ctx.Err().Error(), "the error returned should be context canceled")
require.Equal(t, 0, streamer.fetchCount, "expected number of Fetch calls to match")
require.Equal(t, 0, streamer.notifyCount, "expected number of Notify calls to match")
})
t.Run("returns error from Fetch", func(t *testing.T) {
// GIVEN
wantedErr := errors.New("unexpected fetch error")
streamer := &errStreamer{err: wantedErr}
// WHEN
actualErr := Stream(context.Background(), streamer)
// THEN
require.EqualError(t, actualErr, wantedErr.Error())
})
t.Run("calls Fetch and Notify multiple times until context is canceled", func(t *testing.T) {
t.Parallel()
// GIVEN
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond)
defer cancel()
streamer := &counterStreamer{
next: func() time.Time {
return time.Now().Add(100 * time.Millisecond)
},
}
// WHEN
err := Stream(ctx, streamer)
// THEN
require.EqualError(t, err, ctx.Err().Error(), "the error returned should be context canceled")
require.Greater(t, streamer.fetchCount, 1, "expected more than one call to Fetch within timeout")
require.Greater(t, streamer.notifyCount, 1, "expected more than one call to Notify within timeout")
})
t.Run("calls Fetch and Notify multiple times until there is no more work left", func(t *testing.T) {
t.Parallel()
done := make(chan struct{})
streamer := &counterStreamer{
next: func() time.Time {
return time.Now().Add(100 * time.Millisecond)
},
done: done,
}
go func() {
// Stop the streamer after 1s of work.
<-time.After(300 * time.Millisecond)
close(done)
}()
// WHEN
err := Stream(context.Background(), streamer)
// THEN
require.NoError(t, err)
require.Greater(t, streamer.fetchCount, 1, "expected more than one call to Fetch within timeout")
require.Greater(t, streamer.notifyCount, 1, "expected more than one call to Notify within timeout")
})
}
| 1 | 16,415 | I think we can now remove this test case since we can ensure that multiple calls to `Fetch` will double the interval on each call | aws-copilot-cli | go |
@@ -64,7 +64,7 @@ type (
)
const (
- reservedTaskListPrefix = "/__cadence_sys/"
+ reservedTaskListPrefix = "/__temporal_sys/"
)
func newDecisionAttrValidator( | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"fmt"
"strings"
"github.com/pborman/uuid"
commonproto "go.temporal.io/temporal-proto/common"
"go.temporal.io/temporal-proto/serviceerror"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/backoff"
"github.com/temporalio/temporal/common/cache"
"github.com/temporalio/temporal/common/elasticsearch/validator"
"github.com/temporalio/temporal/common/log"
"github.com/temporalio/temporal/common/log/tag"
"github.com/temporalio/temporal/common/metrics"
"github.com/temporalio/temporal/common/persistence"
)
type (
decisionAttrValidator struct {
domainCache cache.DomainCache
maxIDLengthLimit int
searchAttributesValidator *validator.SearchAttributesValidator
}
workflowSizeChecker struct {
blobSizeLimitWarn int
blobSizeLimitError int
historySizeLimitWarn int
historySizeLimitError int
historyCountLimitWarn int
historyCountLimitError int
completedID int64
mutableState mutableState
executionStats *persistence.ExecutionStats
metricsClient metrics.Client
logger log.Logger
}
)
const (
reservedTaskListPrefix = "/__cadence_sys/"
)
func newDecisionAttrValidator(
domainCache cache.DomainCache,
config *Config,
logger log.Logger,
) *decisionAttrValidator {
return &decisionAttrValidator{
domainCache: domainCache,
maxIDLengthLimit: config.MaxIDLengthLimit(),
searchAttributesValidator: validator.NewSearchAttributesValidator(
logger,
config.ValidSearchAttributes,
config.SearchAttributesNumberOfKeysLimit,
config.SearchAttributesSizeOfValueLimit,
config.SearchAttributesTotalSizeLimit,
),
}
}
func newWorkflowSizeChecker(
blobSizeLimitWarn int,
blobSizeLimitError int,
historySizeLimitWarn int,
historySizeLimitError int,
historyCountLimitWarn int,
historyCountLimitError int,
completedID int64,
mutableState mutableState,
executionStats *persistence.ExecutionStats,
metricsClient metrics.Client,
logger log.Logger,
) *workflowSizeChecker {
return &workflowSizeChecker{
blobSizeLimitWarn: blobSizeLimitWarn,
blobSizeLimitError: blobSizeLimitError,
historySizeLimitWarn: historySizeLimitWarn,
historySizeLimitError: historySizeLimitError,
historyCountLimitWarn: historyCountLimitWarn,
historyCountLimitError: historyCountLimitError,
completedID: completedID,
mutableState: mutableState,
executionStats: executionStats,
metricsClient: metricsClient,
logger: logger,
}
}
func (c *workflowSizeChecker) failWorkflowIfBlobSizeExceedsLimit(
blob []byte,
message string,
) (bool, error) {
executionInfo := c.mutableState.GetExecutionInfo()
err := common.CheckEventBlobSizeLimit(
len(blob),
c.blobSizeLimitWarn,
c.blobSizeLimitError,
executionInfo.DomainID,
executionInfo.WorkflowID,
executionInfo.RunID,
c.metricsClient.Scope(metrics.HistoryRespondDecisionTaskCompletedScope),
c.logger,
)
if err == nil {
return false, nil
}
attributes := &commonproto.FailWorkflowExecutionDecisionAttributes{
Reason: common.FailureReasonDecisionBlobSizeExceedsLimit,
Details: []byte(message),
}
if _, err := c.mutableState.AddFailWorkflowEvent(c.completedID, attributes); err != nil {
return false, err
}
return true, nil
}
func (c *workflowSizeChecker) failWorkflowSizeExceedsLimit() (bool, error) {
historyCount := int(c.mutableState.GetNextEventID()) - 1
historySize := int(c.executionStats.HistorySize)
if historySize > c.historySizeLimitError || historyCount > c.historyCountLimitError {
executionInfo := c.mutableState.GetExecutionInfo()
c.logger.Error("history size exceeds error limit.",
tag.WorkflowDomainID(executionInfo.DomainID),
tag.WorkflowID(executionInfo.WorkflowID),
tag.WorkflowRunID(executionInfo.RunID),
tag.WorkflowHistorySize(historySize),
tag.WorkflowEventCount(historyCount))
attributes := &commonproto.FailWorkflowExecutionDecisionAttributes{
Reason: common.FailureReasonSizeExceedsLimit,
Details: []byte("Workflow history size / count exceeds limit."),
}
if _, err := c.mutableState.AddFailWorkflowEvent(c.completedID, attributes); err != nil {
return false, err
}
return true, nil
}
if historySize > c.historySizeLimitWarn || historyCount > c.historyCountLimitWarn {
executionInfo := c.mutableState.GetExecutionInfo()
c.logger.Warn("history size exceeds warn limit.",
tag.WorkflowDomainID(executionInfo.DomainID),
tag.WorkflowID(executionInfo.WorkflowID),
tag.WorkflowRunID(executionInfo.RunID),
tag.WorkflowHistorySize(historySize),
tag.WorkflowEventCount(historyCount))
return false, nil
}
return false, nil
}
func (v *decisionAttrValidator) validateActivityScheduleAttributes(
domainID string,
targetDomainID string,
attributes *commonproto.ScheduleActivityTaskDecisionAttributes,
wfTimeout int32,
) error {
if err := v.validateCrossDomainCall(
domainID,
targetDomainID,
); err != nil {
return err
}
if attributes == nil {
return serviceerror.NewInvalidArgument("ScheduleActivityTaskDecisionAttributes is not set on decision.")
}
defaultTaskListName := ""
if _, err := v.validatedTaskList(attributes.TaskList, defaultTaskListName); err != nil {
return err
}
if attributes.GetActivityId() == "" {
return serviceerror.NewInvalidArgument("ActivityId is not set on decision.")
}
if attributes.ActivityType == nil || attributes.ActivityType.GetName() == "" {
return serviceerror.NewInvalidArgument("ActivityType is not set on decision.")
}
if err := common.ValidateRetryPolicy(attributes.RetryPolicy); err != nil {
return err
}
if len(attributes.GetActivityId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("ActivityID exceeds length limit.")
}
if len(attributes.GetActivityType().GetName()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("ActivityType exceeds length limit.")
}
if len(attributes.GetDomain()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("Domain exceeds length limit.")
}
// Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative.
if attributes.GetScheduleToCloseTimeoutSeconds() < 0 || attributes.GetScheduleToStartTimeoutSeconds() < 0 ||
attributes.GetStartToCloseTimeoutSeconds() < 0 || attributes.GetHeartbeatTimeoutSeconds() < 0 {
return serviceerror.NewInvalidArgument("A valid timeout may not be negative.")
}
// ensure activity timeout never larger than workflow timeout
if attributes.GetScheduleToCloseTimeoutSeconds() > wfTimeout {
attributes.ScheduleToCloseTimeoutSeconds = wfTimeout
}
if attributes.GetScheduleToStartTimeoutSeconds() > wfTimeout {
attributes.ScheduleToStartTimeoutSeconds = wfTimeout
}
if attributes.GetStartToCloseTimeoutSeconds() > wfTimeout {
attributes.StartToCloseTimeoutSeconds = wfTimeout
}
if attributes.GetHeartbeatTimeoutSeconds() > wfTimeout {
attributes.HeartbeatTimeoutSeconds = wfTimeout
}
validScheduleToClose := attributes.GetScheduleToCloseTimeoutSeconds() > 0
validScheduleToStart := attributes.GetScheduleToStartTimeoutSeconds() > 0
validStartToClose := attributes.GetStartToCloseTimeoutSeconds() > 0
if validScheduleToClose {
if !validScheduleToStart {
attributes.ScheduleToStartTimeoutSeconds = attributes.GetScheduleToCloseTimeoutSeconds()
}
if !validStartToClose {
attributes.StartToCloseTimeoutSeconds = attributes.GetScheduleToCloseTimeoutSeconds()
}
} else if validScheduleToStart && validStartToClose {
attributes.ScheduleToCloseTimeoutSeconds = attributes.GetScheduleToStartTimeoutSeconds() + attributes.GetStartToCloseTimeoutSeconds()
if attributes.GetScheduleToCloseTimeoutSeconds() > wfTimeout {
attributes.ScheduleToCloseTimeoutSeconds = wfTimeout
}
} else {
// Deduction failed as there's not enough information to fill in missing timeouts.
return serviceerror.NewInvalidArgument("A valid ScheduleToCloseTimeout is not set on decision.")
}
// ensure activity's SCHEDULE_TO_START and SCHEDULE_TO_CLOSE is as long as expiration on retry policy
p := attributes.RetryPolicy
if p != nil {
expiration := p.GetExpirationIntervalInSeconds()
if expiration == 0 {
expiration = wfTimeout
}
if attributes.GetScheduleToStartTimeoutSeconds() < expiration {
attributes.ScheduleToStartTimeoutSeconds = expiration
}
if attributes.GetScheduleToCloseTimeoutSeconds() < expiration {
attributes.ScheduleToCloseTimeoutSeconds = expiration
}
}
return nil
}
func (v *decisionAttrValidator) validateTimerScheduleAttributes(
attributes *commonproto.StartTimerDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("StartTimerDecisionAttributes is not set on decision.")
}
if attributes.GetTimerId() == "" {
return serviceerror.NewInvalidArgument("TimerId is not set on decision.")
}
if len(attributes.GetTimerId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("TimerId exceeds length limit.")
}
if attributes.GetStartToFireTimeoutSeconds() <= 0 {
return serviceerror.NewInvalidArgument("A valid StartToFireTimeoutSeconds is not set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateActivityCancelAttributes(
attributes *commonproto.RequestCancelActivityTaskDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("RequestCancelActivityTaskDecisionAttributes is not set on decision.")
}
if attributes.GetActivityId() == "" {
return serviceerror.NewInvalidArgument("ActivityId is not set on decision.")
}
if len(attributes.GetActivityId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("ActivityId exceeds length limit.")
}
return nil
}
func (v *decisionAttrValidator) validateTimerCancelAttributes(
attributes *commonproto.CancelTimerDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("CancelTimerDecisionAttributes is not set on decision.")
}
if attributes.GetTimerId() == "" {
return serviceerror.NewInvalidArgument("TimerId is not set on decision.")
}
if len(attributes.GetTimerId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("TimerId exceeds length limit.")
}
return nil
}
func (v *decisionAttrValidator) validateRecordMarkerAttributes(
attributes *commonproto.RecordMarkerDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("RecordMarkerDecisionAttributes is not set on decision.")
}
if attributes.GetMarkerName() == "" {
return serviceerror.NewInvalidArgument("MarkerName is not set on decision.")
}
if len(attributes.GetMarkerName()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("MarkerName exceeds length limit.")
}
return nil
}
func (v *decisionAttrValidator) validateCompleteWorkflowExecutionAttributes(
attributes *commonproto.CompleteWorkflowExecutionDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("CompleteWorkflowExecutionDecisionAttributes is not set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateFailWorkflowExecutionAttributes(
attributes *commonproto.FailWorkflowExecutionDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("FailWorkflowExecutionDecisionAttributes is not set on decision.")
}
if attributes.GetReason() == "" {
return serviceerror.NewInvalidArgument("Reason is not set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateCancelWorkflowExecutionAttributes(
attributes *commonproto.CancelWorkflowExecutionDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("CancelWorkflowExecutionDecisionAttributes is not set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateCancelExternalWorkflowExecutionAttributes(
domainID string,
targetDomainID string,
attributes *commonproto.RequestCancelExternalWorkflowExecutionDecisionAttributes,
) error {
if err := v.validateCrossDomainCall(
domainID,
targetDomainID,
); err != nil {
return err
}
if attributes == nil {
return serviceerror.NewInvalidArgument("RequestCancelExternalWorkflowExecutionDecisionAttributes is not set on decision.")
}
if attributes.GetWorkflowId() == "" {
return serviceerror.NewInvalidArgument("WorkflowId is not set on decision.")
}
if len(attributes.GetDomain()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("Domain exceeds length limit.")
}
if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.")
}
runID := attributes.GetRunId()
if runID != "" && uuid.Parse(runID) == nil {
return serviceerror.NewInvalidArgument("Invalid RunId set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateSignalExternalWorkflowExecutionAttributes(
domainID string,
targetDomainID string,
attributes *commonproto.SignalExternalWorkflowExecutionDecisionAttributes,
) error {
if err := v.validateCrossDomainCall(
domainID,
targetDomainID,
); err != nil {
return err
}
if attributes == nil {
return serviceerror.NewInvalidArgument("SignalExternalWorkflowExecutionDecisionAttributes is not set on decision.")
}
if attributes.Execution == nil {
return serviceerror.NewInvalidArgument("Execution is nil on decision.")
}
if attributes.Execution.GetWorkflowId() == "" {
return serviceerror.NewInvalidArgument("WorkflowId is not set on decision.")
}
if len(attributes.GetDomain()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("Domain exceeds length limit.")
}
if len(attributes.Execution.GetWorkflowId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.")
}
targetRunID := attributes.Execution.GetRunId()
if targetRunID != "" && uuid.Parse(targetRunID) == nil {
return serviceerror.NewInvalidArgument("Invalid RunId set on decision.")
}
if attributes.GetSignalName() == "" {
return serviceerror.NewInvalidArgument("SignalName is not set on decision.")
}
if attributes.Input == nil {
return serviceerror.NewInvalidArgument("Input is not set on decision.")
}
return nil
}
func (v *decisionAttrValidator) validateUpsertWorkflowSearchAttributes(
domainName string,
attributes *commonproto.UpsertWorkflowSearchAttributesDecisionAttributes,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("UpsertWorkflowSearchAttributesDecisionAttributes is not set on decision.")
}
if attributes.SearchAttributes == nil {
return serviceerror.NewInvalidArgument("SearchAttributes is not set on decision.")
}
if len(attributes.GetSearchAttributes().GetIndexedFields()) == 0 {
return serviceerror.NewInvalidArgument("IndexedFields is empty on decision.")
}
return v.searchAttributesValidator.ValidateSearchAttributes(attributes.GetSearchAttributes(), domainName)
}
func (v *decisionAttrValidator) validateContinueAsNewWorkflowExecutionAttributes(
attributes *commonproto.ContinueAsNewWorkflowExecutionDecisionAttributes,
executionInfo *persistence.WorkflowExecutionInfo,
) error {
if attributes == nil {
return serviceerror.NewInvalidArgument("ContinueAsNewWorkflowExecutionDecisionAttributes is not set on decision.")
}
// Inherit workflow type from previous execution if not provided on decision
if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" {
attributes.WorkflowType = &commonproto.WorkflowType{Name: executionInfo.WorkflowTypeName}
}
if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.")
}
// Inherit Tasklist from previous execution if not provided on decision
taskList, err := v.validatedTaskList(attributes.TaskList, executionInfo.TaskList)
if err != nil {
return err
}
attributes.TaskList = taskList
// Inherit workflow timeout from previous execution if not provided on decision
if attributes.GetExecutionStartToCloseTimeoutSeconds() <= 0 {
attributes.ExecutionStartToCloseTimeoutSeconds = executionInfo.WorkflowTimeout
}
// Inherit decision task timeout from previous execution if not provided on decision
if attributes.GetTaskStartToCloseTimeoutSeconds() <= 0 {
attributes.TaskStartToCloseTimeoutSeconds = executionInfo.DecisionStartToCloseTimeout
}
// Check next run decision task delay
if attributes.GetBackoffStartIntervalInSeconds() < 0 {
return serviceerror.NewInvalidArgument("BackoffStartInterval is less than 0.")
}
domainEntry, err := v.domainCache.GetDomainByID(executionInfo.DomainID)
if err != nil {
return err
}
return v.searchAttributesValidator.ValidateSearchAttributes(attributes.GetSearchAttributes(), domainEntry.GetInfo().Name)
}
func (v *decisionAttrValidator) validateStartChildExecutionAttributes(
domainID string,
targetDomainID string,
attributes *commonproto.StartChildWorkflowExecutionDecisionAttributes,
parentInfo *persistence.WorkflowExecutionInfo,
) error {
if err := v.validateCrossDomainCall(
domainID,
targetDomainID,
); err != nil {
return err
}
if attributes == nil {
return serviceerror.NewInvalidArgument("StartChildWorkflowExecutionDecisionAttributes is not set on decision.")
}
if attributes.GetWorkflowId() == "" {
return serviceerror.NewInvalidArgument("Required field WorkflowID is not set on decision.")
}
if attributes.WorkflowType == nil || attributes.WorkflowType.GetName() == "" {
return serviceerror.NewInvalidArgument("Required field WorkflowType is not set on decision.")
}
if len(attributes.GetDomain()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("Domain exceeds length limit.")
}
if len(attributes.GetWorkflowId()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowId exceeds length limit.")
}
if len(attributes.WorkflowType.GetName()) > v.maxIDLengthLimit {
return serviceerror.NewInvalidArgument("WorkflowType exceeds length limit.")
}
if err := common.ValidateRetryPolicy(attributes.RetryPolicy); err != nil {
return err
}
if err := backoff.ValidateSchedule(attributes.GetCronSchedule()); err != nil {
return err
}
// Inherit tasklist from parent workflow execution if not provided on decision
taskList, err := v.validatedTaskList(attributes.TaskList, parentInfo.TaskList)
if err != nil {
return err
}
attributes.TaskList = taskList
// Inherit workflow timeout from parent workflow execution if not provided on decision
if attributes.GetExecutionStartToCloseTimeoutSeconds() <= 0 {
attributes.ExecutionStartToCloseTimeoutSeconds = parentInfo.WorkflowTimeout
}
// Inherit decision task timeout from parent workflow execution if not provided on decision
if attributes.GetTaskStartToCloseTimeoutSeconds() <= 0 {
attributes.TaskStartToCloseTimeoutSeconds = parentInfo.DecisionStartToCloseTimeout
}
return nil
}
func (v *decisionAttrValidator) validatedTaskList(
taskList *commonproto.TaskList,
defaultVal string,
) (*commonproto.TaskList, error) {
if taskList == nil {
taskList = &commonproto.TaskList{}
}
if taskList.GetName() == "" {
if defaultVal == "" {
return taskList, serviceerror.NewInvalidArgument("missing task list name")
}
taskList.Name = defaultVal
return taskList, nil
}
name := taskList.GetName()
if len(name) > v.maxIDLengthLimit {
return taskList, serviceerror.NewInvalidArgument(fmt.Sprintf("task list name exceeds length limit of %v", v.maxIDLengthLimit))
}
if strings.HasPrefix(name, reservedTaskListPrefix) {
return taskList, serviceerror.NewInvalidArgument(fmt.Sprintf("task list name cannot start with reserved prefix %v", reservedTaskListPrefix))
}
return taskList, nil
}
func (v *decisionAttrValidator) validateCrossDomainCall(
domainID string,
targetDomainID string,
) error {
// same name, no check needed
if domainID == targetDomainID {
return nil
}
domainEntry, err := v.domainCache.GetDomainByID(domainID)
if err != nil {
return err
}
targetDomainEntry, err := v.domainCache.GetDomainByID(targetDomainID)
if err != nil {
return err
}
// both local domain
if !domainEntry.IsGlobalDomain() && !targetDomainEntry.IsGlobalDomain() {
return nil
}
domainClusters := domainEntry.GetReplicationConfig().Clusters
targetDomainClusters := targetDomainEntry.GetReplicationConfig().Clusters
// one is local domain, another one is global domain or both global domain
// treat global domain with one replication cluster as local domain
if len(domainClusters) == 1 && len(targetDomainClusters) == 1 {
if *domainClusters[0] == *targetDomainClusters[0] {
return nil
}
return v.createCrossDomainCallError(domainEntry, targetDomainEntry)
}
return v.createCrossDomainCallError(domainEntry, targetDomainEntry)
}
func (v *decisionAttrValidator) createCrossDomainCallError(
domainEntry *cache.DomainCacheEntry,
targetDomainEntry *cache.DomainCacheEntry,
) error {
return serviceerror.NewInvalidArgument(fmt.Sprintf("cannot make cross domain call between %v and %v", domainEntry.GetInfo().Name, targetDomainEntry.GetInfo().Name))
}
| 1 | 9,356 | Not part of this PR but this const seems to be defined in multiple places. Needs to be extracted somewhere. | temporalio-temporal | go |
@@ -2,6 +2,7 @@ package volume
import (
"fmt"
+
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/client"
"github.com/libopenstorage/openstorage/volume" | 1 | package volume
import (
"fmt"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/client"
"github.com/libopenstorage/openstorage/volume"
)
// VolumeDriver returns a REST wrapper for the VolumeDriver interface.
func VolumeDriver(c *client.Client) volume.VolumeDriver {
return newVolumeClient(c)
}
// NewAuthDriverClient returns a new REST client of the supplied version for specified driver.
// host: REST endpoint [http://<ip>:<port> OR unix://<path-to-unix-socket>]. default: [unix:///var/lib/osd/<driverName>.sock]
// version: Volume API version
func NewAuthDriverClient(host, driverName, version, authstring, accesstoken, userAgent string) (*client.Client, error) {
if driverName == "" {
return nil, fmt.Errorf("Driver Name cannot be empty")
}
if host == "" {
host = client.GetUnixServerPath(driverName, volume.DriverAPIBase)
}
if version == "" {
// Set the default version
version = volume.APIVersion
}
return client.NewAuthClient(host, version, authstring, accesstoken, userAgent)
}
// NewDriverClient returns a new REST client of the supplied version for specified driver.
// host: REST endpoint [http://<ip>:<port> OR unix://<path-to-unix-socket>]. default: [unix:///var/lib/osd/<driverName>.sock]
// version: Volume API version
// userAgent: Drivername for http connections
func NewDriverClient(host, driverName, version, userAgent string) (*client.Client, error) {
if host == "" {
if driverName == "" {
return nil, fmt.Errorf("Driver Name cannot be empty")
}
host = client.GetUnixServerPath(driverName, volume.DriverAPIBase)
}
if version == "" {
// Set the default version
version = volume.APIVersion
}
return client.NewClient(host, version, userAgent)
}
// GetSupportedDriverVersions returns a list of supported versions
// for the provided driver. It uses the given server endpoint or the
// standard unix domain socket
func GetSupportedDriverVersions(driverName, host string) ([]string, error) {
// Get a client handler
if host == "" {
host = client.GetUnixServerPath(driverName, volume.DriverAPIBase)
}
client, err := client.NewClient(host, "", "")
if err != nil {
return []string{}, err
}
versions, err := client.Versions(api.OsdVolumePath)
if err != nil {
return []string{}, err
}
return versions, nil
}
| 1 | 6,421 | Remove this file from the PR | libopenstorage-openstorage | go |
@@ -55,6 +55,11 @@ namespace Nethermind.Core.Specs
{
return Byzantium.Instance;
}
+
+ if (blockNumber < IstanbulBlockNumber)
+ {
+ return Istanbul.Instance;
+ }
return ConstantinopleFix.Instance;
} | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using Nethermind.Core.Specs.Forks;
namespace Nethermind.Core.Specs
{
public class MainNetSpecProvider : ISpecProvider
{
public IReleaseSpec GenesisSpec => Frontier.Instance;
public IReleaseSpec GetSpec(long blockNumber)
{
if (blockNumber < HomesteadBlockNumber)
{
return Frontier.Instance;
}
if (blockNumber < DaoBlockNumber)
{
return Homestead.Instance;
}
if (blockNumber < TangerineWhistleBlockNumber)
{
return Dao.Instance;
}
if (blockNumber < SpuriousDragonBlockNumber)
{
return TangerineWhistle.Instance;
}
if (blockNumber < ByzantiumBlockNumber)
{
return SpuriousDragon.Instance;
}
if (blockNumber < ConstantinopleFixBlockNumber)
{
return Byzantium.Instance;
}
return ConstantinopleFix.Instance;
}
public static long HomesteadBlockNumber { get; } = 1150000;
public long? DaoBlockNumber { get; } = 1920000;
public static long TangerineWhistleBlockNumber { get; } = 2463000;
public static long SpuriousDragonBlockNumber { get; } = 2675000;
public static long ByzantiumBlockNumber { get; } = 4370000;
public static long ConstantinopleFixBlockNumber { get; } = 7280000;
public int ChainId => 1;
private MainNetSpecProvider()
{
}
public static MainNetSpecProvider Instance = new MainNetSpecProvider();
}
} | 1 | 22,444 | need to add it for ropsten, rinkeby, goerli as well | NethermindEth-nethermind | .cs |
@@ -109,7 +109,7 @@ func (v *VolumeProperty) Execute() ([]byte, error) {
return nil, err
}
// execute command here
- return exec.Command(bin.ZFS, v.Command).CombinedOutput()
+ return exec.Command(bin.BASH, "-c", v.Command).CombinedOutput()
}
// Build returns the VolumeProperty object generated by builder | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vproperty
import (
"fmt"
"os/exec"
"reflect"
"runtime"
"strings"
"github.com/openebs/maya/pkg/zfs/cmd/v1alpha1/bin"
"github.com/pkg/errors"
)
//VolumeProperty defines structure for volume 'Property' operation
type VolumeProperty struct {
//list of property
Proplist []string
//set property
OpSet bool
//dataset name
Dataset string
//snapshot name
Snapshot string
//command for this structure
Command string
// checks is list of predicate function used for validating object
checks []PredicateFunc
// error
err error
}
// NewVolumeProperty returns new instance of object VolumeProperty
func NewVolumeProperty() *VolumeProperty {
return &VolumeProperty{}
}
// WithCheck add given check to checks list
func (v *VolumeProperty) WithCheck(check ...PredicateFunc) *VolumeProperty {
v.checks = append(v.checks, check...)
return v
}
// WithProperty method fills the Proplist field of VolumeProperty object.
func (v *VolumeProperty) WithProperty(key, value string) *VolumeProperty {
v.Proplist = append(v.Proplist, fmt.Sprintf("%s=%s", key, value))
return v
}
// WithOpSet method fills the OpSet field of VolumeProperty object.
func (v *VolumeProperty) WithOpSet(OpSet bool) *VolumeProperty {
v.OpSet = OpSet
return v
}
// WithDataset method fills the Dataset field of VolumeProperty object.
func (v *VolumeProperty) WithDataset(Dataset string) *VolumeProperty {
v.Dataset = Dataset
return v
}
// WithSnapshot method fills the Snapshot field of VolumeProperty object.
func (v *VolumeProperty) WithSnapshot(Snapshot string) *VolumeProperty {
v.Snapshot = Snapshot
return v
}
// WithCommand method fills the Command field of VolumeProperty object.
func (v *VolumeProperty) WithCommand(Command string) *VolumeProperty {
v.Command = Command
return v
}
// Validate is to validate generated VolumeProperty object by builder
func (v *VolumeProperty) Validate() *VolumeProperty {
for _, check := range v.checks {
if !check(v) {
v.err = errors.Wrapf(v.err, "validation failed {%v}", runtime.FuncForPC(reflect.ValueOf(check).Pointer()).Name())
}
}
return v
}
// Execute is to execute generated VolumeProperty object
func (v *VolumeProperty) Execute() ([]byte, error) {
v, err := v.Build()
if err != nil {
return nil, err
}
// execute command here
return exec.Command(bin.ZFS, v.Command).CombinedOutput()
}
// Build returns the VolumeProperty object generated by builder
func (v *VolumeProperty) Build() (*VolumeProperty, error) {
var c strings.Builder
v = v.Validate()
if IsProplistSet()(v) {
for _, p := range v.Proplist {
v.appendCommand(c, fmt.Sprintf(" -o %s", p))
}
}
if IsOpSet()(v) {
v.appendCommand(c, "set")
} else {
v.appendCommand(c, "get")
}
v.appendCommand(c, v.Dataset)
if IsSnapshotSet()(v) {
v.appendCommand(c, fmt.Sprintf("@%s", v.Snapshot))
}
v.Command = c.String()
return v, v.err
}
// appendCommand append string to given string builder
func (v *VolumeProperty) appendCommand(c strings.Builder, cmd string) {
_, err := c.WriteString(cmd)
if err != nil {
v.err = errors.Wrapf(v.err, "Failed to append cmd{%s} : %s", cmd, err.Error())
}
}
| 1 | 16,752 | G204: Subprocess launching should be audited (from `gosec`) | openebs-maya | go |
@@ -49,11 +49,11 @@ import org.apache.orc.storage.ql.exec.vector.MapColumnVector;
import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector;
import org.apache.orc.storage.serde2.io.HiveDecimalWritable;
-class FlinkOrcReaders {
+public class FlinkOrcReaders {
private FlinkOrcReaders() {
}
- static OrcValueReader<StringData> strings() {
+ public static OrcValueReader<StringData> strings() {
return StringReader.INSTANCE;
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink.data;
import java.math.BigDecimal;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.util.List;
import java.util.Map;
import org.apache.flink.table.data.ArrayData;
import org.apache.flink.table.data.DecimalData;
import org.apache.flink.table.data.GenericArrayData;
import org.apache.flink.table.data.GenericMapData;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.MapData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.iceberg.orc.OrcValueReader;
import org.apache.iceberg.orc.OrcValueReaders;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Types;
import org.apache.orc.storage.ql.exec.vector.BytesColumnVector;
import org.apache.orc.storage.ql.exec.vector.ColumnVector;
import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector;
import org.apache.orc.storage.ql.exec.vector.ListColumnVector;
import org.apache.orc.storage.ql.exec.vector.LongColumnVector;
import org.apache.orc.storage.ql.exec.vector.MapColumnVector;
import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector;
import org.apache.orc.storage.serde2.io.HiveDecimalWritable;
class FlinkOrcReaders {
private FlinkOrcReaders() {
}
static OrcValueReader<StringData> strings() {
return StringReader.INSTANCE;
}
static OrcValueReader<Integer> dates() {
return DateReader.INSTANCE;
}
static OrcValueReader<DecimalData> decimals(int precision, int scale) {
if (precision <= 18) {
return new Decimal18Reader(precision, scale);
} else if (precision <= 38) {
return new Decimal38Reader(precision, scale);
} else {
throw new IllegalArgumentException("Invalid precision: " + precision);
}
}
static OrcValueReader<Integer> times() {
return TimeReader.INSTANCE;
}
static OrcValueReader<TimestampData> timestamps() {
return TimestampReader.INSTANCE;
}
static OrcValueReader<TimestampData> timestampTzs() {
return TimestampTzReader.INSTANCE;
}
static <T> OrcValueReader<ArrayData> array(OrcValueReader<T> elementReader) {
return new ArrayReader<>(elementReader);
}
public static <K, V> OrcValueReader<MapData> map(OrcValueReader<K> keyReader, OrcValueReader<V> valueReader) {
return new MapReader<>(keyReader, valueReader);
}
public static OrcValueReader<RowData> struct(List<OrcValueReader<?>> readers,
Types.StructType struct,
Map<Integer, ?> idToConstant) {
return new StructReader(readers, struct, idToConstant);
}
private static class StringReader implements OrcValueReader<StringData> {
private static final StringReader INSTANCE = new StringReader();
@Override
public StringData nonNullRead(ColumnVector vector, int row) {
BytesColumnVector bytesVector = (BytesColumnVector) vector;
return StringData.fromBytes(bytesVector.vector[row], bytesVector.start[row], bytesVector.length[row]);
}
}
private static class DateReader implements OrcValueReader<Integer> {
private static final DateReader INSTANCE = new DateReader();
@Override
public Integer nonNullRead(ColumnVector vector, int row) {
return (int) ((LongColumnVector) vector).vector[row];
}
}
private static class Decimal18Reader implements OrcValueReader<DecimalData> {
private final int precision;
private final int scale;
Decimal18Reader(int precision, int scale) {
this.precision = precision;
this.scale = scale;
}
@Override
public DecimalData nonNullRead(ColumnVector vector, int row) {
HiveDecimalWritable value = ((DecimalColumnVector) vector).vector[row];
// The hive ORC writer may will adjust the scale of decimal data.
Preconditions.checkArgument(value.precision() <= precision,
"Cannot read value as decimal(%s,%s), too large: %s", precision, scale, value);
return DecimalData.fromUnscaledLong(value.serialize64(scale), precision, scale);
}
}
private static class Decimal38Reader implements OrcValueReader<DecimalData> {
private final int precision;
private final int scale;
Decimal38Reader(int precision, int scale) {
this.precision = precision;
this.scale = scale;
}
@Override
public DecimalData nonNullRead(ColumnVector vector, int row) {
BigDecimal value = ((DecimalColumnVector) vector).vector[row].getHiveDecimal().bigDecimalValue();
Preconditions.checkArgument(value.precision() <= precision,
"Cannot read value as decimal(%s,%s), too large: %s", precision, scale, value);
return DecimalData.fromBigDecimal(value, precision, scale);
}
}
private static class TimeReader implements OrcValueReader<Integer> {
private static final TimeReader INSTANCE = new TimeReader();
@Override
public Integer nonNullRead(ColumnVector vector, int row) {
long micros = ((LongColumnVector) vector).vector[row];
// Flink only support time mills, just erase micros.
return (int) (micros / 1000);
}
}
private static class TimestampReader implements OrcValueReader<TimestampData> {
private static final TimestampReader INSTANCE = new TimestampReader();
@Override
public TimestampData nonNullRead(ColumnVector vector, int row) {
TimestampColumnVector tcv = (TimestampColumnVector) vector;
LocalDateTime localDate = Instant.ofEpochSecond(Math.floorDiv(tcv.time[row], 1_000), tcv.nanos[row])
.atOffset(ZoneOffset.UTC)
.toLocalDateTime();
return TimestampData.fromLocalDateTime(localDate);
}
}
private static class TimestampTzReader implements OrcValueReader<TimestampData> {
private static final TimestampTzReader INSTANCE = new TimestampTzReader();
@Override
public TimestampData nonNullRead(ColumnVector vector, int row) {
TimestampColumnVector tcv = (TimestampColumnVector) vector;
Instant instant = Instant.ofEpochSecond(Math.floorDiv(tcv.time[row], 1_000), tcv.nanos[row])
.atOffset(ZoneOffset.UTC)
.toInstant();
return TimestampData.fromInstant(instant);
}
}
private static class ArrayReader<T> implements OrcValueReader<ArrayData> {
private final OrcValueReader<T> elementReader;
private ArrayReader(OrcValueReader<T> elementReader) {
this.elementReader = elementReader;
}
@Override
public ArrayData nonNullRead(ColumnVector vector, int row) {
ListColumnVector listVector = (ListColumnVector) vector;
int offset = (int) listVector.offsets[row];
int length = (int) listVector.lengths[row];
List<T> elements = Lists.newArrayListWithExpectedSize(length);
for (int c = 0; c < length; ++c) {
elements.add(elementReader.read(listVector.child, offset + c));
}
return new GenericArrayData(elements.toArray());
}
@Override
public void setBatchContext(long batchOffsetInFile) {
elementReader.setBatchContext(batchOffsetInFile);
}
}
private static class MapReader<K, V> implements OrcValueReader<MapData> {
private final OrcValueReader<K> keyReader;
private final OrcValueReader<V> valueReader;
private MapReader(OrcValueReader<K> keyReader, OrcValueReader<V> valueReader) {
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
public MapData nonNullRead(ColumnVector vector, int row) {
MapColumnVector mapVector = (MapColumnVector) vector;
int offset = (int) mapVector.offsets[row];
long length = mapVector.lengths[row];
Map<K, V> map = Maps.newHashMap();
for (int c = 0; c < length; c++) {
K key = keyReader.read(mapVector.keys, offset + c);
V value = valueReader.read(mapVector.values, offset + c);
map.put(key, value);
}
return new GenericMapData(map);
}
@Override
public void setBatchContext(long batchOffsetInFile) {
keyReader.setBatchContext(batchOffsetInFile);
valueReader.setBatchContext(batchOffsetInFile);
}
}
private static class StructReader extends OrcValueReaders.StructReader<RowData> {
private final int numFields;
StructReader(List<OrcValueReader<?>> readers, Types.StructType struct, Map<Integer, ?> idToConstant) {
super(readers, struct, idToConstant);
this.numFields = struct.fields().size();
}
@Override
protected RowData create() {
return new GenericRowData(numFields);
}
@Override
protected void set(RowData struct, int pos, Object value) {
((GenericRowData) struct).setField(pos, value);
}
}
}
| 1 | 37,081 | curious why only making these 3 public. what about other package scope methods like `times`, `timestamps`, `array`? | apache-iceberg | java |
@@ -0,0 +1,15 @@
+<?php
+
+return [
+ /*
+ |--------------------------------------------------------------------------
+ | Exclusion list
+ |--------------------------------------------------------------------------
+ |
+ | This is a list of exclusions for words or phrases where the original
+ | form of the word has the same spelling in a given language.
+ |
+ */
+
+ 'Email',
+]; | 1 | 1 | 7,901 | The Email field is not translated into Russian, and into any other. It's kind of a "standard" word. Therefore, we may face the fact that you have to create files with exceptions for all languages. @caouecs, what do you say? | Laravel-Lang-lang | php |
|
@@ -32,6 +32,9 @@ int main (int argc, char * const * argv)
("disable_unchecked_drop", "Disables drop of unchecked table at startup")
("fast_bootstrap", "Increase bootstrap speed for high end nodes with higher limits")
("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512")
+ ("block_processor_batch_size",boost::program_options::value<std::size_t> (), "Increase block processor transaction batch write size, default 0 (limited by config block_processor_batch_max_time), 256k for fast_bootstrap")
+ ("block_processor_full_size",boost::program_options::value<std::size_t> (), "Increase block processor allowed blocks queue size before dropping live network packets and holding bootstrap download, default 65536, 1 million for fast_bootstrap")
+ ("block_processor_verification_size",boost::program_options::value<std::size_t> (), "Increase batch signature verification size in block processor, default 0 (2048 * signature checker threads + 1), unlimited for fast_bootstrap")
("debug_block_count", "Display the number of block")
("debug_bootstrap_generate", "Generate bootstrap sequence of blocks")
("debug_dump_online_weight", "Dump online_weights table") | 1 | #include <nano/lib/utility.hpp>
#include <nano/nano_node/daemon.hpp>
#include <nano/node/cli.hpp>
#include <nano/node/node.hpp>
#include <nano/node/rpc.hpp>
#include <nano/node/testing.hpp>
#include <sstream>
#include <argon2.h>
#include <boost/lexical_cast.hpp>
#include <boost/program_options.hpp>
int main (int argc, char * const * argv)
{
nano::set_umask ();
boost::program_options::options_description description ("Command line options");
nano::add_node_options (description);
// clang-format off
description.add_options ()
("help", "Print out options")
("version", "Prints out version")
("daemon", "Start node daemon")
("disable_backup", "Disable wallet automatic backups")
("disable_lazy_bootstrap", "Disables lazy bootstrap")
("disable_legacy_bootstrap", "Disables legacy bootstrap")
("disable_wallet_bootstrap", "Disables wallet lazy bootstrap")
("disable_bootstrap_listener", "Disables bootstrap listener (incoming connections)")
("disable_unchecked_cleanup", "Disables periodic cleanup of old records from unchecked table")
("disable_unchecked_drop", "Disables drop of unchecked table at startup")
("fast_bootstrap", "Increase bootstrap speed for high end nodes with higher limits")
("batch_size",boost::program_options::value<std::size_t> (), "Increase sideband batch size, default 512")
("debug_block_count", "Display the number of block")
("debug_bootstrap_generate", "Generate bootstrap sequence of blocks")
("debug_dump_online_weight", "Dump online_weights table")
("debug_dump_representatives", "List representatives and weights")
("debug_account_count", "Display the number of accounts")
("debug_mass_activity", "Generates fake debug activity")
("debug_profile_generate", "Profile work generation")
("debug_opencl", "OpenCL work generation")
("debug_profile_verify", "Profile work verification")
("debug_profile_kdf", "Profile kdf function")
("debug_verify_profile", "Profile signature verification")
("debug_verify_profile_batch", "Profile batch signature verification")
("debug_profile_bootstrap", "Profile bootstrap style blocks processing (at least 10GB of free storage space required)")
("debug_profile_sign", "Profile signature generation")
("debug_profile_process", "Profile active blocks processing (only for nano_test_network)")
("debug_profile_votes", "Profile votes processing (only for nano_test_network)")
("debug_random_feed", "Generates output to RNG test suites")
("debug_rpc", "Read an RPC command from stdin and invoke it. Network operations will have no effect.")
("debug_validate_blocks", "Check all blocks for correct hash, signature, work value")
("debug_peers", "Display peer IPv6:port connections")
("platform", boost::program_options::value<std::string> (), "Defines the <platform> for OpenCL commands")
("device", boost::program_options::value<std::string> (), "Defines <device> for OpenCL command")
("threads", boost::program_options::value<std::string> (), "Defines <threads> count for OpenCL command")
("difficulty", boost::program_options::value<std::string> (), "Defines <difficulty> for OpenCL command, HEX");
// clang-format on
boost::program_options::variables_map vm;
try
{
boost::program_options::store (boost::program_options::parse_command_line (argc, argv, description), vm);
}
catch (boost::program_options::error const & err)
{
std::cerr << err.what () << std::endl;
return 1;
}
boost::program_options::notify (vm);
int result (0);
auto network (vm.find ("network"));
if (network != vm.end ())
{
auto err (nano::network_params::set_active_network (network->second.as<std::string> ()));
if (err)
{
std::cerr << err.get_message () << std::endl;
std::exit (1);
}
}
auto data_path_it = vm.find ("data_path");
if (data_path_it == vm.end ())
{
std::string error_string;
if (!nano::migrate_working_path (error_string))
{
std::cerr << error_string << std::endl;
return 1;
}
}
boost::filesystem::path data_path ((data_path_it != vm.end ()) ? data_path_it->second.as<std::string> () : nano::working_path ());
auto ec = nano::handle_node_options (vm);
if (ec == nano::error_cli::unknown_command)
{
if (vm.count ("daemon") > 0)
{
nano_daemon::daemon daemon;
nano::node_flags flags;
auto batch_size_it = vm.find ("batch_size");
if (batch_size_it != vm.end ())
{
flags.sideband_batch_size = batch_size_it->second.as<size_t> ();
}
flags.disable_backup = (vm.count ("disable_backup") > 0);
flags.disable_lazy_bootstrap = (vm.count ("disable_lazy_bootstrap") > 0);
flags.disable_legacy_bootstrap = (vm.count ("disable_legacy_bootstrap") > 0);
flags.disable_wallet_bootstrap = (vm.count ("disable_wallet_bootstrap") > 0);
flags.disable_bootstrap_listener = (vm.count ("disable_bootstrap_listener") > 0);
flags.disable_unchecked_cleanup = (vm.count ("disable_unchecked_cleanup") > 0);
flags.disable_unchecked_drop = (vm.count ("disable_unchecked_drop") > 0);
flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0);
daemon.run (data_path, flags);
}
else if (vm.count ("debug_block_count"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cout << boost::str (boost::format ("Block count: %1%\n") % node.node->store.block_count (transaction).sum ());
}
else if (vm.count ("debug_bootstrap_generate"))
{
auto key_it = vm.find ("key");
if (key_it != vm.end ())
{
nano::uint256_union key;
if (!key.decode_hex (key_it->second.as<std::string> ()))
{
nano::keypair genesis (key.to_string ());
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
std::cout << "Genesis: " << genesis.prv.data.to_string () << "\n"
<< "Public: " << genesis.pub.to_string () << "\n"
<< "Account: " << genesis.pub.to_account () << "\n";
nano::keypair landing;
std::cout << "Landing: " << landing.prv.data.to_string () << "\n"
<< "Public: " << landing.pub.to_string () << "\n"
<< "Account: " << landing.pub.to_account () << "\n";
for (auto i (0); i != 32; ++i)
{
nano::keypair rep;
std::cout << "Rep" << i << ": " << rep.prv.data.to_string () << "\n"
<< "Public: " << rep.pub.to_string () << "\n"
<< "Account: " << rep.pub.to_account () << "\n";
}
nano::uint128_t balance (std::numeric_limits<nano::uint128_t>::max ());
nano::open_block genesis_block (genesis.pub, genesis.pub, genesis.pub, genesis.prv, genesis.pub, work.generate (genesis.pub));
std::cout << genesis_block.to_json ();
std::cout.flush ();
nano::block_hash previous (genesis_block.hash ());
for (auto i (0); i != 8; ++i)
{
nano::uint128_t yearly_distribution (nano::uint128_t (1) << (127 - (i == 7 ? 6 : i)));
auto weekly_distribution (yearly_distribution / 52);
for (auto j (0); j != 52; ++j)
{
assert (balance > weekly_distribution);
balance = balance < (weekly_distribution * 2) ? 0 : balance - weekly_distribution;
nano::send_block send (previous, landing.pub, balance, genesis.prv, genesis.pub, work.generate (previous));
previous = send.hash ();
std::cout << send.to_json ();
std::cout.flush ();
}
}
}
else
{
std::cerr << "Invalid key\n";
result = -1;
}
}
else
{
std::cerr << "Bootstrapping requires one <key> option\n";
result = -1;
}
}
else if (vm.count ("debug_dump_online_weight"))
{
nano::inactive_node node (data_path);
auto current (node.node->online_reps.online_stake ());
std::cout << boost::str (boost::format ("Online Weight %1%\n") % current);
auto transaction (node.node->store.tx_begin_read ());
for (auto i (node.node->store.online_weight_begin (transaction)), n (node.node->store.online_weight_end ()); i != n; ++i)
{
using time_point = std::chrono::system_clock::time_point;
time_point ts (std::chrono::duration_cast<time_point::duration> (std::chrono::nanoseconds (i->first)));
std::time_t timestamp = std::chrono::system_clock::to_time_t (ts);
std::string weight;
i->second.encode_dec (weight);
std::cout << boost::str (boost::format ("Timestamp %1% Weight %2%\n") % ctime (×tamp) % weight);
}
}
else if (vm.count ("debug_dump_representatives"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
nano::uint128_t total;
for (auto i (node.node->store.representation_begin (transaction)), n (node.node->store.representation_end ()); i != n; ++i)
{
nano::account account (i->first);
auto amount (node.node->store.representation_get (transaction, account));
total += amount;
std::cout << boost::str (boost::format ("%1% %2% %3%\n") % account.to_account () % amount.convert_to<std::string> () % total.convert_to<std::string> ());
}
std::map<nano::account, nano::uint128_t> calculated;
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
nano::account_info info (i->second);
nano::block_hash rep_block (node.node->ledger.representative_calculated (transaction, info.head));
auto block (node.node->store.block_get (transaction, rep_block));
calculated[block->representative ()] += info.balance.number ();
}
total = 0;
for (auto i (calculated.begin ()), n (calculated.end ()); i != n; ++i)
{
total += i->second;
std::cout << boost::str (boost::format ("%1% %2% %3%\n") % i->first.to_account () % i->second.convert_to<std::string> () % total.convert_to<std::string> ());
}
}
else if (vm.count ("debug_account_count"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cout << boost::str (boost::format ("Frontier count: %1%\n") % node.node->store.account_count (transaction));
}
else if (vm.count ("debug_mass_activity"))
{
nano::system system (24000, 1);
uint32_t count (1000000);
system.generate_mass_activity (count, *system.nodes[0]);
}
else if (vm.count ("debug_profile_kdf"))
{
nano::network_params network_params;
nano::uint256_union result;
nano::uint256_union salt (0);
std::string password ("");
while (true)
{
auto begin1 (std::chrono::high_resolution_clock::now ());
auto success (argon2_hash (1, network_params.kdf_work, 1, password.data (), password.size (), salt.bytes.data (), salt.bytes.size (), result.bytes.data (), result.bytes.size (), NULL, 0, Argon2_d, 0x10));
(void)success;
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("Derivation time: %1%us\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_profile_generate"))
{
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << "Starting generation profiling\n";
while (true)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
block.block_work_set (work.generate (block.root ()));
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_opencl"))
{
nano::network_params network_params;
bool error (false);
nano::opencl_environment environment (error);
if (!error)
{
unsigned short platform (0);
auto platform_it = vm.find ("platform");
if (platform_it != vm.end ())
{
try
{
platform = boost::lexical_cast<unsigned short> (platform_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid platform id\n";
result = -1;
}
}
unsigned short device (0);
auto device_it = vm.find ("device");
if (device_it != vm.end ())
{
try
{
device = boost::lexical_cast<unsigned short> (device_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid device id\n";
result = -1;
}
}
unsigned threads (1024 * 1024);
auto threads_it = vm.find ("threads");
if (threads_it != vm.end ())
{
try
{
threads = boost::lexical_cast<unsigned> (threads_it->second.as<std::string> ());
}
catch (boost::bad_lexical_cast &)
{
std::cerr << "Invalid threads count\n";
result = -1;
}
}
uint64_t difficulty (network_params.publish_threshold);
auto difficulty_it = vm.find ("difficulty");
if (difficulty_it != vm.end ())
{
if (nano::from_string_hex (difficulty_it->second.as<std::string> (), difficulty))
{
std::cerr << "Invalid difficulty\n";
result = -1;
}
}
if (!result)
{
error |= platform >= environment.platforms.size ();
if (!error)
{
error |= device >= environment.platforms[platform].devices.size ();
if (!error)
{
nano::logging logging;
auto opencl (nano::opencl_work::create (true, { platform, device, threads }, logging));
nano::work_pool work_pool (std::numeric_limits<unsigned>::max (), opencl ? [&opencl](nano::uint256_union const & root_a, uint64_t difficulty_a) {
return opencl->generate_work (root_a, difficulty_a);
}
: std::function<boost::optional<uint64_t> (nano::uint256_union const &, uint64_t)> (nullptr));
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << boost::str (boost::format ("Starting OpenCL generation profiling. Platform: %1%. Device: %2%. Threads: %3%. Difficulty: %4$#x\n") % platform % device % threads % difficulty);
for (uint64_t i (0); true; ++i)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
block.block_work_set (work_pool.generate (block.root (), difficulty));
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else
{
std::cout << "Not available device id\n"
<< std::endl;
result = -1;
}
}
else
{
std::cout << "Not available platform id\n"
<< std::endl;
result = -1;
}
}
}
else
{
std::cout << "Error initializing OpenCL" << std::endl;
result = -1;
}
}
else if (vm.count ("debug_profile_verify"))
{
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::change_block block (0, 0, nano::keypair ().prv, 0, 0);
std::cerr << "Starting verification profiling\n";
while (true)
{
block.hashables.previous.qwords[0] += 1;
auto begin1 (std::chrono::high_resolution_clock::now ());
for (uint64_t t (0); t < 1000000; ++t)
{
block.hashables.previous.qwords[0] += 1;
block.block_work_set (t);
nano::work_validate (block);
}
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_verify_profile"))
{
nano::keypair key;
nano::uint256_union message;
nano::uint512_union signature;
signature = nano::sign_message (key.prv, key.pub, message);
auto begin (std::chrono::high_resolution_clock::now ());
for (auto i (0u); i < 1000; ++i)
{
nano::validate_message (key.pub, message, signature);
}
auto end (std::chrono::high_resolution_clock::now ());
std::cerr << "Signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl;
}
else if (vm.count ("debug_verify_profile_batch"))
{
nano::keypair key;
size_t batch_count (1000);
nano::uint256_union message;
nano::uint512_union signature (nano::sign_message (key.prv, key.pub, message));
std::vector<unsigned char const *> messages (batch_count, message.bytes.data ());
std::vector<size_t> lengths (batch_count, sizeof (message));
std::vector<unsigned char const *> pub_keys (batch_count, key.pub.bytes.data ());
std::vector<unsigned char const *> signatures (batch_count, signature.bytes.data ());
std::vector<int> verifications;
verifications.resize (batch_count);
auto begin (std::chrono::high_resolution_clock::now ());
nano::validate_message_batch (messages.data (), lengths.data (), pub_keys.data (), signatures.data (), batch_count, verifications.data ());
auto end (std::chrono::high_resolution_clock::now ());
std::cerr << "Batch signature verifications " << std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count () << std::endl;
}
else if (vm.count ("debug_profile_sign"))
{
std::cerr << "Starting blocks signing profiling\n";
while (true)
{
nano::keypair key;
nano::block_hash latest (0);
auto begin1 (std::chrono::high_resolution_clock::now ());
for (uint64_t balance (0); balance < 1000; ++balance)
{
nano::send_block send (latest, key.pub, balance, key.prv, key.pub, 0);
latest = send.hash ();
}
auto end1 (std::chrono::high_resolution_clock::now ());
std::cerr << boost::str (boost::format ("%|1$ 12d|\n") % std::chrono::duration_cast<std::chrono::microseconds> (end1 - begin1).count ());
}
}
else if (vm.count ("debug_profile_process"))
{
nano::network_params::set_active_network (nano::nano_networks::nano_test_network);
nano::network_params test_params;
nano::block_builder builder;
size_t num_accounts (100000);
size_t num_interations (5); // 100,000 * 5 * 2 = 1,000,000 blocks
size_t max_blocks (2 * num_accounts * num_interations + num_accounts * 2); // 1,000,000 + 2* 100,000 = 1,200,000 blocks
std::cerr << boost::str (boost::format ("Starting pregenerating %1% blocks\n") % max_blocks);
nano::system system (24000, 1);
nano::node_init init;
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::logging logging;
auto path (nano::unique_path ());
logging.init (path);
auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work));
nano::block_hash genesis_latest (node->latest (test_params.ledger.test_genesis_key.pub));
nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ());
// Generating keys
std::vector<nano::keypair> keys (num_accounts);
std::vector<nano::block_hash> frontiers (num_accounts);
std::vector<nano::uint128_t> balances (num_accounts, 1000000000);
// Generating blocks
std::deque<std::shared_ptr<nano::block>> blocks;
for (auto i (0); i != num_accounts; ++i)
{
genesis_balance = genesis_balance - 1000000000;
auto send = builder.state ()
.account (test_params.ledger.test_genesis_key.pub)
.previous (genesis_latest)
.representative (test_params.ledger.test_genesis_key.pub)
.balance (genesis_balance)
.link (keys[i].pub)
.sign (keys[i].prv, keys[i].pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
blocks.push_back (std::move (send));
auto open = builder.state ()
.account (keys[i].pub)
.previous (0)
.representative (keys[i].pub)
.balance (balances[i])
.link (genesis_latest)
.sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub)
.work (work.generate (keys[i].pub))
.build ();
frontiers[i] = open->hash ();
blocks.push_back (std::move (open));
}
for (auto i (0); i != num_interations; ++i)
{
for (auto j (0); j != num_accounts; ++j)
{
size_t other (num_accounts - j - 1);
// Sending to other account
--balances[j];
auto send = builder.state ()
.account (keys[j].pub)
.previous (frontiers[j])
.representative (keys[j].pub)
.balance (balances[j])
.link (keys[other].pub)
.sign (keys[j].prv, keys[j].pub)
.work (work.generate (frontiers[j]))
.build ();
frontiers[j] = send->hash ();
blocks.push_back (std::move (send));
// Receiving
++balances[other];
auto receive = builder.state ()
.account (keys[other].pub)
.previous (frontiers[other])
.representative (keys[other].pub)
.balance (balances[other])
.link (frontiers[j])
.sign (keys[other].prv, keys[other].pub)
.work (work.generate (frontiers[other]))
.build ();
frontiers[other] = receive->hash ();
blocks.push_back (std::move (receive));
}
}
// Processing blocks
std::cerr << boost::str (boost::format ("Starting processing %1% active blocks\n") % max_blocks);
auto begin (std::chrono::high_resolution_clock::now ());
while (!blocks.empty ())
{
auto block (blocks.front ());
node->process_active (block);
blocks.pop_front ();
}
uint64_t block_count (0);
while (block_count < max_blocks + 1)
{
std::this_thread::sleep_for (std::chrono::milliseconds (100));
auto transaction (node->store.tx_begin ());
block_count = node->store.block_count (transaction).sum ();
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
node->stop ();
std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% blocks per second\n") % time % (max_blocks * 1000000 / time));
}
else if (vm.count ("debug_profile_votes"))
{
nano::network_params::set_active_network (nano::nano_networks::nano_test_network);
nano::network_params test_params;
nano::block_builder builder;
size_t num_elections (40000);
size_t num_representatives (25);
size_t max_votes (num_elections * num_representatives); // 40,000 * 25 = 1,000,000 votes
std::cerr << boost::str (boost::format ("Starting pregenerating %1% votes\n") % max_votes);
nano::system system (24000, 1);
nano::node_init init;
nano::work_pool work (std::numeric_limits<unsigned>::max (), nullptr);
nano::logging logging;
auto path (nano::unique_path ());
logging.init (path);
auto node (std::make_shared<nano::node> (init, system.io_ctx, 24001, path, system.alarm, logging, work));
nano::block_hash genesis_latest (node->latest (test_params.ledger.test_genesis_key.pub));
nano::uint128_t genesis_balance (std::numeric_limits<nano::uint128_t>::max ());
// Generating keys
std::vector<nano::keypair> keys (num_representatives);
nano::uint128_t balance ((node->config.online_weight_minimum.number () / num_representatives) + 1);
for (auto i (0); i != num_representatives; ++i)
{
auto transaction (node->store.tx_begin_write ());
genesis_balance = genesis_balance - balance;
auto send = builder.state ()
.account (test_params.ledger.test_genesis_key.pub)
.previous (genesis_latest)
.representative (test_params.ledger.test_genesis_key.pub)
.balance (genesis_balance)
.link (keys[i].pub)
.sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
node->ledger.process (transaction, *send);
auto open = builder.state ()
.account (keys[i].pub)
.previous (0)
.representative (keys[i].pub)
.balance (balance)
.link (genesis_latest)
.sign (keys[i].prv, keys[i].pub)
.work (work.generate (keys[i].pub))
.build ();
node->ledger.process (transaction, *open);
}
// Generating blocks
std::deque<std::shared_ptr<nano::block>> blocks;
for (auto i (0); i != num_elections; ++i)
{
genesis_balance = genesis_balance - 1;
nano::keypair destination;
auto send = builder.state ()
.account (test_params.ledger.test_genesis_key.pub)
.previous (genesis_latest)
.representative (test_params.ledger.test_genesis_key.pub)
.balance (genesis_balance)
.link (destination.pub)
.sign (test_params.ledger.test_genesis_key.prv, test_params.ledger.test_genesis_key.pub)
.work (work.generate (genesis_latest))
.build ();
genesis_latest = send->hash ();
blocks.push_back (std::move (send));
}
// Generating votes
std::deque<std::shared_ptr<nano::vote>> votes;
for (auto j (0); j != num_representatives; ++j)
{
uint64_t sequence (1);
for (auto & i : blocks)
{
auto vote (std::make_shared<nano::vote> (keys[j].pub, keys[j].prv, sequence, std::vector<nano::block_hash> (1, i->hash ())));
votes.push_back (vote);
sequence++;
}
}
// Processing block & start elections
while (!blocks.empty ())
{
auto block (blocks.front ());
node->process_active (block);
blocks.pop_front ();
}
node->block_processor.flush ();
// Processing votes
std::cerr << boost::str (boost::format ("Starting processing %1% votes\n") % max_votes);
auto begin (std::chrono::high_resolution_clock::now ());
while (!votes.empty ())
{
auto vote (votes.front ());
node->vote_processor.vote (vote, node->network.endpoint ());
votes.pop_front ();
}
while (!node->active.empty ())
{
std::this_thread::sleep_for (std::chrono::milliseconds (100));
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
node->stop ();
std::cerr << boost::str (boost::format ("%|1$ 12d| us \n%2% votes per second\n") % time % (max_votes * 1000000 / time));
}
else if (vm.count ("debug_random_feed"))
{
/*
* This command redirects an infinite stream of bytes from the random pool to standard out.
* The result can be fed into various tools for testing RNGs and entropy pools.
*
* Example, running the entire dieharder test suite:
*
* ./nano_node --debug_random_feed | dieharder -a -g 200
*/
nano::raw_key seed;
for (;;)
{
nano::random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ());
std::cout.write (reinterpret_cast<const char *> (seed.data.bytes.data ()), seed.data.bytes.size ());
}
}
else if (vm.count ("debug_rpc"))
{
std::string rpc_input_l;
std::ostringstream command_l;
while (std::cin >> rpc_input_l)
{
command_l << rpc_input_l;
}
auto response_handler_l ([](boost::property_tree::ptree const & tree_a) {
boost::property_tree::write_json (std::cout, tree_a);
// Terminate as soon as we have the result, even if background threads (like work generation) are running.
std::exit (0);
});
nano::inactive_node inactive_node_l (data_path);
nano::rpc_config rpc_config_l;
rpc_config_l.enable_control = true;
std::unique_ptr<nano::rpc> rpc_l = get_rpc (inactive_node_l.node->io_ctx, *inactive_node_l.node, rpc_config_l);
std::string req_id_l ("1");
nano::rpc_handler handler_l (*inactive_node_l.node, *rpc_l, command_l.str (), req_id_l, response_handler_l);
handler_l.process_request ();
}
else if (vm.count ("debug_validate_blocks"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
std::cerr << boost::str (boost::format ("Performing blocks hash, signature, work validation...\n"));
size_t count (0);
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
++count;
if ((count % 20000) == 0)
{
std::cout << boost::str (boost::format ("%1% accounts validated\n") % count);
}
nano::account_info info (i->second);
nano::account account (i->first);
auto hash (info.open_block);
nano::block_hash calculated_hash (0);
nano::block_sideband sideband;
uint64_t height (0);
uint64_t previous_timestamp (0);
while (!hash.is_zero ())
{
// Retrieving block data
auto block (node.node->store.block_get (transaction, hash, &sideband));
// Check for state & open blocks if account field is correct
if (block->type () == nano::block_type::open || block->type () == nano::block_type::state)
{
if (block->account () != account)
{
std::cerr << boost::str (boost::format ("Incorrect account field for block %1%\n") % hash.to_string ());
}
}
// Check if sideband account is correct
else if (sideband.account != account)
{
std::cerr << boost::str (boost::format ("Incorrect sideband account for block %1%\n") % hash.to_string ());
}
// Check if previous field is correct
if (calculated_hash != block->previous ())
{
std::cerr << boost::str (boost::format ("Incorrect previous field for block %1%\n") % hash.to_string ());
}
// Check if block data is correct (calculating hash)
calculated_hash = block->hash ();
if (calculated_hash != hash)
{
std::cerr << boost::str (boost::format ("Invalid data inside block %1% calculated hash: %2%\n") % hash.to_string () % calculated_hash.to_string ());
}
// Check if block signature is correct
if (validate_message (account, hash, block->block_signature ()))
{
bool invalid (true);
// Epoch blocks
if (!node.node->ledger.epoch_link.is_zero () && block->type () == nano::block_type::state)
{
auto & state_block (static_cast<nano::state_block &> (*block.get ()));
nano::amount prev_balance (0);
if (!state_block.hashables.previous.is_zero ())
{
prev_balance = node.node->ledger.balance (transaction, state_block.hashables.previous);
}
if (node.node->ledger.is_epoch_link (state_block.hashables.link) && state_block.hashables.balance == prev_balance)
{
invalid = validate_message (node.node->ledger.epoch_signer, hash, block->block_signature ());
}
}
if (invalid)
{
std::cerr << boost::str (boost::format ("Invalid signature for block %1%\n") % hash.to_string ());
}
}
// Check if block work value is correct
if (nano::work_validate (*block.get ()))
{
std::cerr << boost::str (boost::format ("Invalid work for block %1% value: %2%\n") % hash.to_string () % nano::to_string_hex (block->block_work ()));
}
// Check if sideband height is correct
++height;
if (sideband.height != height)
{
std::cerr << boost::str (boost::format ("Incorrect sideband height for block %1%. Sideband: %2%. Expected: %3%\n") % hash.to_string () % sideband.height % height);
}
// Check if sideband timestamp is after previous timestamp
if (sideband.timestamp < previous_timestamp)
{
std::cerr << boost::str (boost::format ("Incorrect sideband timestamp for block %1%\n") % hash.to_string ());
}
previous_timestamp = sideband.timestamp;
// Retrieving successor block hash
hash = node.node->store.block_successor (transaction, hash);
}
if (info.block_count != height)
{
std::cerr << boost::str (boost::format ("Incorrect block count for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % height % info.block_count);
}
if (info.head != calculated_hash)
{
std::cerr << boost::str (boost::format ("Incorrect frontier for account %1%. Actual: %2%. Expected: %3%\n") % account.to_account () % calculated_hash.to_string () % info.head.to_string ());
}
}
std::cout << boost::str (boost::format ("%1% accounts validated\n") % count);
count = 0;
for (auto i (node.node->store.pending_begin (transaction)), n (node.node->store.pending_end ()); i != n; ++i)
{
++count;
if ((count % 50000) == 0)
{
std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count);
}
nano::pending_key key (i->first);
nano::pending_info info (i->second);
// Check block existance
auto block (node.node->store.block_get (transaction, key.hash));
if (block == nullptr)
{
std::cerr << boost::str (boost::format ("Pending block not existing %1%\n") % key.hash.to_string ());
}
else
{
// Check if pending destination is correct
nano::account destination (0);
if (auto state = dynamic_cast<nano::state_block *> (block.get ()))
{
if (node.node->ledger.is_send (transaction, *state))
{
destination = state->hashables.link;
}
}
else if (auto send = dynamic_cast<nano::send_block *> (block.get ()))
{
destination = send->hashables.destination;
}
else
{
std::cerr << boost::str (boost::format ("Incorrect type for pending block %1%\n") % key.hash.to_string ());
}
if (key.account != destination)
{
std::cerr << boost::str (boost::format ("Incorrect destination for pending block %1%\n") % key.hash.to_string ());
}
// Check if pending source is correct
auto account (node.node->ledger.account (transaction, key.hash));
if (info.source != account)
{
std::cerr << boost::str (boost::format ("Incorrect source for pending block %1%\n") % key.hash.to_string ());
}
// Check if pending amount is correct
auto amount (node.node->ledger.amount (transaction, key.hash));
if (info.amount != amount)
{
std::cerr << boost::str (boost::format ("Incorrect amount for pending block %1%\n") % key.hash.to_string ());
}
}
}
std::cout << boost::str (boost::format ("%1% pending blocks validated\n") % count);
}
else if (vm.count ("debug_profile_bootstrap"))
{
nano::inactive_node node2 (nano::unique_path (), 24001);
node2.node->flags.fast_bootstrap = (vm.count ("fast_bootstrap") > 0);
nano::genesis genesis;
auto begin (std::chrono::high_resolution_clock::now ());
uint64_t block_count (0);
size_t count (0);
{
nano::inactive_node node (data_path, 24000);
auto transaction (node.node->store.tx_begin ());
block_count = node.node->store.block_count (transaction).sum ();
std::cout << boost::str (boost::format ("Performing bootstrap emulation, %1% blocks in ledger...") % block_count) << std::endl;
for (auto i (node.node->store.latest_begin (transaction)), n (node.node->store.latest_end ()); i != n; ++i)
{
nano::account account (i->first);
nano::account_info info (i->second);
auto hash (info.head);
while (!hash.is_zero ())
{
// Retrieving block data
auto block (node.node->store.block_get (transaction, hash));
if (block != nullptr)
{
++count;
if ((count % 100000) == 0)
{
std::cout << boost::str (boost::format ("%1% blocks retrieved") % count) << std::endl;
}
nano::unchecked_info unchecked_info (block, account, 0, nano::signature_verification::unknown);
node2.node->block_processor.add (unchecked_info);
// Retrieving previous block hash
hash = block->previous ();
}
}
}
}
count = 0;
uint64_t block_count_2 (0);
while (block_count_2 != block_count)
{
std::this_thread::sleep_for (std::chrono::seconds (1));
auto transaction_2 (node2.node->store.tx_begin ());
block_count_2 = node2.node->store.block_count (transaction_2).sum ();
if ((count % 60) == 0)
{
std::cout << boost::str (boost::format ("%1% (%2%) blocks processed") % block_count_2 % node2.node->store.unchecked_count (transaction_2)) << std::endl;
}
count++;
}
auto end (std::chrono::high_resolution_clock::now ());
auto time (std::chrono::duration_cast<std::chrono::microseconds> (end - begin).count ());
auto seconds (time / 1000000);
nano::remove_temporary_directories ();
std::cout << boost::str (boost::format ("%|1$ 12d| seconds \n%2% blocks per second") % seconds % (block_count / seconds)) << std::endl;
}
else if (vm.count ("debug_peers"))
{
nano::inactive_node node (data_path);
auto transaction (node.node->store.tx_begin ());
for (auto i (node.node->store.peers_begin (transaction)), n (node.node->store.peers_end ()); i != n; ++i)
{
std::cout << boost::str (boost::format ("%1%\n") % nano::endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ()));
}
}
else if (vm.count ("version"))
{
if (NANO_VERSION_PATCH == 0)
{
std::cout << "Version " << NANO_MAJOR_MINOR_VERSION << std::endl;
}
else
{
std::cout << "Version " << NANO_MAJOR_MINOR_RC_VERSION << std::endl;
}
}
else
{
std::cout << description << std::endl;
result = -1;
}
}
return result;
}
| 1 | 15,144 | Should it say `default 0` here? looks a bit confusing. Also following BODMAS we should put: `2048 * (signature checker threads + 1)` | nanocurrency-nano-node | cpp |
@@ -16,6 +16,7 @@
from __future__ import print_function
import os
+import ruamel.yaml
import constants
import utils | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" File utility functions."""
from __future__ import print_function
import os
import constants
import utils
def generate_deployment_templates(template_type, values, datetimestamp):
"""Generate deployment templates.
Args:
template_type (str): Type of the template, either cli or server
values (dict): Values needed for deployment
datetimestamp (str): Timestamp
Returns:
str: Path of the deployment template
Raises:
KeyError: KeyError
"""
template_type = template_type.lower()
if template_type not in constants.INPUT_DEPLOYMENT_TEMPLATE_FILENAME:
raise KeyError # template type not found
input_template_filename = (
constants.INPUT_DEPLOYMENT_TEMPLATE_FILENAME[template_type])
# Deployment template in file
deploy_tpl_path = os.path.abspath(
os.path.join(
constants.ROOT_DIR_PATH,
'deployment-templates',
input_template_filename))
out_tpl_path = os.path.abspath(
os.path.join(
constants.ROOT_DIR_PATH,
'deployment-templates',
'deploy-forseti-{}-{}.yaml'.format(template_type, datetimestamp)))
if generate_file_from_template(deploy_tpl_path,
out_tpl_path,
values):
return out_tpl_path
# Deployment template not generated successfully
return None
def generate_forseti_conf(template_type, vals, datetimestamp):
"""Generate Forseti conf file.
Args:
template_type (str): Type of the template, either cli or server
vals (dict): Values needed for deployment
datetimestamp (str): Timestamp
Returns:
str: Path of the deployment template
Raises:
KeyError: KeyError
"""
template_type = template_type.lower()
if template_type not in constants.INPUT_CONFIGURATION_TEMPLATE_FILENAME:
raise KeyError # template type not found
input_template_name = (
constants.INPUT_CONFIGURATION_TEMPLATE_FILENAME[template_type])
forseti_conf_in = os.path.abspath(
os.path.join(
constants.ROOT_DIR_PATH,
'configs', template_type, input_template_name))
forseti_conf_gen = os.path.abspath(
os.path.join(
constants.ROOT_DIR_PATH, 'configs', template_type,
'forseti_conf_{}_{}.yaml'.format(template_type, datetimestamp)))
conf_values = utils.sanitize_conf_values(vals)
if generate_file_from_template(forseti_conf_in,
forseti_conf_gen,
conf_values):
return forseti_conf_gen
# forseti_conf not generated successfully
return None
def generate_file_from_template(template_path, output_path, template_values):
"""Write to file.
Args:
template_path (str): Input template path
output_path (str): Path of the output file
template_values (dict): Values to replace the
ones in the input template
Returns:
bool: Whether or not file has been generated
"""
try:
with open(template_path, 'r') as in_tmpl:
tmpl_contents = in_tmpl.read()
out_contents = tmpl_contents.format(**template_values)
with open(output_path, 'w') as out_file:
out_file.write(out_contents)
return True
except EnvironmentError:
pass
return False
def copy_file_to_destination(file_path, output_path,
is_directory, dry_run):
"""Copy the config to the created bucket.
Args:
file_path (str): Path to the file
output_path (str): Path of the copied file
is_directory (bool): Whether or not the input file_path is a directory
dry_run (bool): Whether or not the installer is in dry run mode
Returns:
bool: True if copy file succeeded, otherwise False.
"""
utils.print_banner('Copying {} to {}'.format(file_path, output_path))
if dry_run:
print('This is a dry run, so skipping this step.')
return False
if is_directory:
args = ['gsutil', 'cp', '-r', file_path, output_path]
else:
args = ['gsutil', 'cp', file_path, output_path]
return_code, out, err = utils.run_command(args)
if return_code:
print(err)
else:
print(out)
return True
return False
| 1 | 28,850 | Add a newline. We group by 1: standard library, 2: 3rd party library, 3: application library | forseti-security-forseti-security | py |
@@ -83,7 +83,9 @@ func (p *Provider) Type() string {
return ProviderType
}
-func (p *Provider) RunQuery(ctx context.Context, query string, queryRange metrics.QueryRange, evaluator metrics.Evaluator) (bool, string, error) {
+// Evaluate queries the range query endpoint and checks if values in all data points are within the expected range.
+// For the range query endpoint, see: https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
+func (p *Provider) Evaluate(ctx context.Context, query string, queryRange metrics.QueryRange, evaluator metrics.Evaluator) (bool, string, error) {
if err := queryRange.Validate(); err != nil {
return false, "", err
} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"context"
"fmt"
"math"
"time"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/piped/analysisprovider/metrics"
)
const (
ProviderType = "Prometheus"
defaultTimeout = 30 * time.Second
)
// Provider is a client for prometheus.
type Provider struct {
api v1.API
//username string
//password string
timeout time.Duration
logger *zap.Logger
}
func NewProvider(address string, opts ...Option) (*Provider, error) {
if address == "" {
return nil, fmt.Errorf("address is required")
}
client, err := api.NewClient(api.Config{
Address: address,
})
if err != nil {
return nil, err
}
p := &Provider{
api: v1.NewAPI(client),
timeout: defaultTimeout,
logger: zap.NewNop(),
}
for _, opt := range opts {
opt(p)
}
return p, nil
}
type Option func(*Provider)
func WithTimeout(timeout time.Duration) Option {
return func(p *Provider) {
p.timeout = timeout
}
}
func WithLogger(logger *zap.Logger) Option {
return func(p *Provider) {
p.logger = logger.Named("prometheus-provider")
}
}
func (p *Provider) Type() string {
return ProviderType
}
func (p *Provider) RunQuery(ctx context.Context, query string, queryRange metrics.QueryRange, evaluator metrics.Evaluator) (bool, string, error) {
if err := queryRange.Validate(); err != nil {
return false, "", err
}
ctx, cancel := context.WithTimeout(ctx, p.timeout)
defer cancel()
p.logger.Info("run query", zap.String("query", query))
// TODO: Use HTTP Basic Authentication with the username and password when needed.
response, warnings, err := p.api.QueryRange(ctx, query, v1.Range{
Start: queryRange.From,
End: queryRange.To,
Step: queryRange.Step,
})
if err != nil {
return false, "", err
}
for _, w := range warnings {
p.logger.Warn("non critical error occurred", zap.String("warning", w))
}
return evaluate(evaluator, response)
}
func evaluate(evaluator metrics.Evaluator, response model.Value) (bool, string, error) {
successReason := fmt.Sprintf("all values are within the expected range (%s)", evaluator)
evaluateValue := func(value float64) (bool, error) {
if math.IsNaN(value) {
return false, fmt.Errorf("the value is not a number")
}
return evaluator.InRange(value), nil
}
// NOTE: Maybe it's enough to handle only matrix type as long as calling range queries endpoint.
switch res := response.(type) {
case *model.Scalar:
expected, err := evaluateValue(float64(res.Value))
if err != nil {
return false, "", err
}
if !expected {
reason := fmt.Sprintf("found a value (%g) that is out of the expected range (%s)", float64(res.Value), evaluator)
return false, reason, nil
}
return true, successReason, nil
case model.Vector:
if len(res) == 0 {
return false, "", fmt.Errorf("zero value in instant vector type returned")
}
// Check if all values are expected value.
for _, s := range res {
if s == nil {
continue
}
expected, err := evaluateValue(float64(s.Value))
if err != nil {
return false, "", err
}
if !expected {
reason := fmt.Sprintf("found a value (%g) that is out of the expected range (%s)", float64(s.Value), evaluator)
return false, reason, nil
}
}
return true, successReason, nil
case model.Matrix:
if len(res) == 0 {
return false, "", fmt.Errorf("no time series data points in range vector type")
}
// Check if all values are expected value.
for _, r := range res {
if len(r.Values) == 0 {
return false, "", fmt.Errorf("zero value in range vector type returned")
}
for _, value := range r.Values {
expected, err := evaluateValue(float64(value.Value))
if err != nil {
return false, "", err
}
if !expected {
reason := fmt.Sprintf("found a value (%g) that is out of the expected range (%s)", float64(value.Value), evaluator)
return false, reason, nil
}
}
}
return true, successReason, nil
default:
return false, "", fmt.Errorf("unexpected data type returned")
}
}
| 1 | 15,466 | So `queryRange.Step` will be ignored? | pipe-cd-pipe | go |
@@ -24,13 +24,13 @@
// making the defines very clear, these represent the host architecture - aka
// the arch on which this code is running
-#if defined(_X86_)
+#if defined(HOST_X86)
#define _HOST_X86_
-#elif defined(_AMD64_)
+#elif defined(HOST_AMD64)
#define _HOST_AMD64_
-#elif defined(_ARM_)
+#elif defined(HOST_ARM)
#define _HOST_ARM_
-#elif defined(_ARM64_)
+#elif defined(HOST_ARM64)
#define _HOST_ARM64_
#endif
| 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//*****************************************************************************
// debugshim.cpp
//
//
//*****************************************************************************
#include "debugshim.h"
#include "dbgutil.h"
#include <crtdbg.h>
#include <clrinternal.h> //has the CLR_ID_V4_DESKTOP guid in it
#include "palclr.h"
#ifndef IMAGE_FILE_MACHINE_ARMNT
#define IMAGE_FILE_MACHINE_ARMNT 0x01c4 // ARM Thumb-2 Little-Endian
#endif
#ifndef IMAGE_FILE_MACHINE_ARM64
#define IMAGE_FILE_MACHINE_ARM64 0xAA64 // ARM64 Little-Endian
#endif
// making the defines very clear, these represent the host architecture - aka
// the arch on which this code is running
#if defined(_X86_)
#define _HOST_X86_
#elif defined(_AMD64_)
#define _HOST_AMD64_
#elif defined(_ARM_)
#define _HOST_ARM_
#elif defined(_ARM64_)
#define _HOST_ARM64_
#endif
//*****************************************************************************
// CLRDebuggingImpl implementation (ICLRDebugging)
//*****************************************************************************
typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImpl2FnPtr)(ULONG64 clrInstanceId,
IUnknown * pDataTarget,
LPCWSTR pDacModulePath,
CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion,
REFIID riid,
IUnknown ** ppInstance,
CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags);
typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcessImplFnPtr)(ULONG64 clrInstanceId,
IUnknown * pDataTarget,
HMODULE hDacDll,
CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion,
REFIID riid,
IUnknown ** ppInstance,
CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags);
typedef HRESULT (STDAPICALLTYPE *OpenVirtualProcess2FnPtr)(ULONG64 clrInstanceId,
IUnknown * pDataTarget,
HMODULE hDacDll,
REFIID riid,
IUnknown ** ppInstance,
CLR_DEBUGGING_PROCESS_FLAGS * pdwFlags);
typedef HMODULE (STDAPICALLTYPE *LoadLibraryWFnPtr)(LPCWSTR lpLibFileName);
// Implementation of ICLRDebugging::OpenVirtualProcess
//
// Arguments:
// moduleBaseAddress - the address of the module which might be a CLR
// pDataTarget - the data target for inspecting the process
// pLibraryProvider - a callback for locating DBI and DAC
// pMaxDebuggerSupportedVersion - the max version of the CLR that this debugger will support debugging
// riidProcess - the IID of the interface that should be passed back in ppProcess
// ppProcess - output for the ICorDebugProcess# if this module is a CLR
// pVersion - the CLR version if this module is a CLR
// pFlags - output, see the CLR_DEBUGGING_PROCESS_FLAGS for more details. Right now this has only one possible
// value which indicates this runtime had an unhandled exception
STDMETHODIMP CLRDebuggingImpl::OpenVirtualProcess(
ULONG64 moduleBaseAddress,
IUnknown * pDataTarget,
ICLRDebuggingLibraryProvider * pLibraryProvider,
CLR_DEBUGGING_VERSION * pMaxDebuggerSupportedVersion,
REFIID riidProcess,
IUnknown ** ppProcess,
CLR_DEBUGGING_VERSION * pVersion,
CLR_DEBUGGING_PROCESS_FLAGS * pFlags)
{
//PRECONDITION(CheckPointer(pDataTarget));
HRESULT hr = S_OK;
ICorDebugDataTarget * pDt = NULL;
HMODULE hDbi = NULL;
HMODULE hDac = NULL;
LPWSTR pDacModulePath = NULL;
LPWSTR pDbiModulePath = NULL;
DWORD dbiTimestamp;
DWORD dbiSizeOfImage;
WCHAR dbiName[MAX_PATH_FNAME] = { 0 };
DWORD dacTimestamp;
DWORD dacSizeOfImage;
WCHAR dacName[MAX_PATH_FNAME] = { 0 };
CLR_DEBUGGING_VERSION version;
BOOL versionSupportedByCaller = FALSE;
// argument checking
if ((ppProcess != NULL || pFlags != NULL) && pLibraryProvider == NULL)
{
hr = E_POINTER; // the library provider must be specified if either
// ppProcess or pFlags is non-NULL
}
else if ((ppProcess != NULL || pFlags != NULL) && pMaxDebuggerSupportedVersion == NULL)
{
hr = E_POINTER; // the max supported version must be specified if either
// ppProcess or pFlags is non-NULL
}
else if (pVersion != NULL && pVersion->wStructVersion != 0)
{
hr = CORDBG_E_UNSUPPORTED_VERSION_STRUCT;
}
else if (FAILED(pDataTarget->QueryInterface(__uuidof(ICorDebugDataTarget), (void**)&pDt)))
{
hr = CORDBG_E_MISSING_DATA_TARGET_INTERFACE;
}
if (SUCCEEDED(hr))
{
// get CLR version
// The expectation is that new versions of the CLR will continue to use the same GUID
// (unless there's a reason to hide them from older shims), but debuggers will tell us the
// CLR version they're designed for and mscordbi.dll can decide whether or not to accept it.
version.wStructVersion = 0;
hr = GetCLRInfo(pDt,
moduleBaseAddress,
&version,
&dbiTimestamp,
&dbiSizeOfImage,
dbiName,
MAX_PATH_FNAME,
&dacTimestamp,
&dacSizeOfImage,
dacName,
MAX_PATH_FNAME);
}
// If we need to fetch either the process info or the flags info then we need to find
// mscordbi and DAC and do the version specific OVP work
if (SUCCEEDED(hr) && (ppProcess != NULL || pFlags != NULL))
{
ICLRDebuggingLibraryProvider2* pLibraryProvider2;
if (SUCCEEDED(pLibraryProvider->QueryInterface(__uuidof(ICLRDebuggingLibraryProvider2), (void**)&pLibraryProvider2)))
{
if (FAILED(pLibraryProvider2->ProvideLibrary2(dbiName, dbiTimestamp, dbiSizeOfImage, &pDbiModulePath)) ||
pDbiModulePath == NULL)
{
hr = CORDBG_E_LIBRARY_PROVIDER_ERROR;
}
if (SUCCEEDED(hr))
{
hDbi = LoadLibraryW(pDbiModulePath);
if (hDbi == NULL)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
}
if (SUCCEEDED(hr))
{
// Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted
RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage);
// Ask library provider for dac
if (FAILED(pLibraryProvider2->ProvideLibrary2(dacName, dacTimestamp, dacSizeOfImage, &pDacModulePath)) ||
pDacModulePath == NULL)
{
hr = CORDBG_E_LIBRARY_PROVIDER_ERROR;
}
if (SUCCEEDED(hr))
{
hDac = LoadLibraryW(pDacModulePath);
if (hDac == NULL)
{
hr = HRESULT_FROM_WIN32(GetLastError());
}
}
}
pLibraryProvider2->Release();
}
else {
// Ask library provider for dbi
if (FAILED(pLibraryProvider->ProvideLibrary(dbiName, dbiTimestamp, dbiSizeOfImage, &hDbi)) ||
hDbi == NULL)
{
hr = CORDBG_E_LIBRARY_PROVIDER_ERROR;
}
if (SUCCEEDED(hr))
{
// Adjust the timestamp and size of image if this DAC is a known buggy version and needs to be retargeted
RetargetDacIfNeeded(&dacTimestamp, &dacSizeOfImage);
// ask library provider for dac
if (FAILED(pLibraryProvider->ProvideLibrary(dacName, dacTimestamp, dacSizeOfImage, &hDac)) ||
hDac == NULL)
{
hr = CORDBG_E_LIBRARY_PROVIDER_ERROR;
}
}
}
*ppProcess = NULL;
if (SUCCEEDED(hr) && pDacModulePath != NULL)
{
// Get access to the latest OVP implementation and call it
OpenVirtualProcessImpl2FnPtr ovpFn = (OpenVirtualProcessImpl2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl2");
if (ovpFn != NULL)
{
hr = ovpFn(moduleBaseAddress, pDataTarget, pDacModulePath, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags);
if (FAILED(hr))
{
_ASSERTE(ppProcess == NULL || *ppProcess == NULL);
_ASSERTE(pFlags == NULL || *pFlags == 0);
}
}
#ifdef FEATURE_PAL
else
{
// On Linux/MacOS the DAC module handle needs to be re-created using the DAC PAL instance
// before being passed to DBI's OpenVirtualProcess* implementation. The DBI and DAC share
// the same PAL where dbgshim has it's own.
LoadLibraryWFnPtr loadLibraryWFn = (LoadLibraryWFnPtr)GetProcAddress(hDac, "LoadLibraryW");
if (loadLibraryWFn != NULL)
{
hDac = loadLibraryWFn(pDacModulePath);
if (hDac == NULL)
{
hr = E_HANDLE;
}
}
else
{
hr = E_HANDLE;
}
}
#endif // FEATURE_PAL
}
// If no errors so far and "OpenVirtualProcessImpl2" doesn't exist
if (SUCCEEDED(hr) && *ppProcess == NULL)
{
// Get access to OVP and call it
OpenVirtualProcessImplFnPtr ovpFn = (OpenVirtualProcessImplFnPtr)GetProcAddress(hDbi, "OpenVirtualProcessImpl");
if (ovpFn == NULL)
{
// Fallback to CLR v4 Beta1 path, but skip some of the checking we'd normally do (maxSupportedVersion, etc.)
OpenVirtualProcess2FnPtr ovp2Fn = (OpenVirtualProcess2FnPtr)GetProcAddress(hDbi, "OpenVirtualProcess2");
if (ovp2Fn == NULL)
{
hr = CORDBG_E_LIBRARY_PROVIDER_ERROR;
}
else
{
hr = ovp2Fn(moduleBaseAddress, pDataTarget, hDac, riidProcess, ppProcess, pFlags);
}
}
else
{
// Have a CLR v4 Beta2+ DBI, call it and let it do the version check
hr = ovpFn(moduleBaseAddress, pDataTarget, hDac, pMaxDebuggerSupportedVersion, riidProcess, ppProcess, pFlags);
if (FAILED(hr))
{
_ASSERTE(ppProcess == NULL || *ppProcess == NULL);
_ASSERTE(pFlags == NULL || *pFlags == 0);
}
}
}
}
//version is still valid in some failure cases
if (pVersion != NULL &&
(SUCCEEDED(hr) ||
(hr == CORDBG_E_UNSUPPORTED_DEBUGGING_MODEL) ||
(hr == CORDBG_E_UNSUPPORTED_FORWARD_COMPAT)))
{
memcpy(pVersion, &version, sizeof(CLR_DEBUGGING_VERSION));
}
if (pDacModulePath != NULL)
{
#ifdef FEATURE_PAL
free(pDacModulePath);
#else
CoTaskMemFree(pDacModulePath);
#endif
}
if (pDbiModulePath != NULL)
{
#ifdef FEATURE_PAL
free(pDbiModulePath);
#else
CoTaskMemFree(pDbiModulePath);
#endif
}
// free the data target we QI'ed earlier
if (pDt != NULL)
{
pDt->Release();
}
return hr;
}
// Checks to see if this DAC is one of a known set of old DAC builds which contains an issue.
// If so we retarget to a newer compatible version which has the bug fixed. This is done
// by changing the PE information used to lookup the DAC.
//
// Arguments
// pdwTimeStamp - on input, the timestamp of DAC as embedded in the CLR image
// on output, a potentially new timestamp for an updated DAC to use
// instead
// pdwSizeOfImage - on input, the sizeOfImage of DAC as embedded in the CLR image
// on output, a potentially new sizeOfImage for an updated DAC to use
// instead
VOID CLRDebuggingImpl::RetargetDacIfNeeded(DWORD* pdwTimeStamp,
DWORD* pdwSizeOfImage)
{
// This code is auto generated by the CreateRetargetTable tool
// on 3/4/2011 6:35 PM
// and then copy-pasted here.
//
//
//
// Retarget the GDR1 amd64 build
if( (*pdwTimeStamp == 0x4d536868) && (*pdwSizeOfImage == 0x17b000))
{
*pdwTimeStamp = 0x4d71a160;
*pdwSizeOfImage = 0x17b000;
}
// Retarget the GDR1 x86 build
else if( (*pdwTimeStamp == 0x4d5368f2) && (*pdwSizeOfImage == 0x120000))
{
*pdwTimeStamp = 0x4d71a14f;
*pdwSizeOfImage = 0x120000;
}
// Retarget the RTM amd64 build
else if( (*pdwTimeStamp == 0x4ba21fa7) && (*pdwSizeOfImage == 0x17b000))
{
*pdwTimeStamp = 0x4d71a13c;
*pdwSizeOfImage = 0x17b000;
}
// Retarget the RTM x86 build
else if( (*pdwTimeStamp == 0x4ba1da25) && (*pdwSizeOfImage == 0x120000))
{
*pdwTimeStamp = 0x4d71a128;
*pdwSizeOfImage = 0x120000;
}
// This code is auto generated by the CreateRetargetTable tool
// on 8/17/2011 1:28 AM
// and then copy-pasted here.
//
//
//
// Retarget the GDR2 amd64 build
else if( (*pdwTimeStamp == 0x4da428c7) && (*pdwSizeOfImage == 0x17b000))
{
*pdwTimeStamp = 0x4e4b7bc2;
*pdwSizeOfImage = 0x17b000;
}
// Retarget the GDR2 x86 build
else if( (*pdwTimeStamp == 0x4da3fe52) && (*pdwSizeOfImage == 0x120000))
{
*pdwTimeStamp = 0x4e4b7bb1;
*pdwSizeOfImage = 0x120000;
}
// End auto-generated code
}
#define PE_FIXEDFILEINFO_SIGNATURE 0xFEEF04BD
// The format of the special debugging resource we embed in CLRs starting in
// v4
struct CLR_DEBUG_RESOURCE
{
DWORD dwVersion;
GUID signature;
DWORD dwDacTimeStamp;
DWORD dwDacSizeOfImage;
DWORD dwDbiTimeStamp;
DWORD dwDbiSizeOfImage;
};
// Checks to see if a module is a CLR and if so, fetches the debug data
// from the embedded resource
//
// Arguments
// pDataTarget - dataTarget for the process we are inspecting
// moduleBaseAddress - base address of a module we should inspect
// pVersion - output, the version of the CLR detected if this is a CLR
// pdwDbiTimeStamp - the timestamp of DBI as embedded in the CLR image
// pdwDbiSizeOfImage - the SizeOfImage of DBI as embedded in the CLR image
// pDbiName - output, the filename of DBI (as calculated by this function but that might change)
// dwDbiNameCharCount - input, the number of WCHARs in the buffer pointed to by pDbiName
// pdwDacTimeStampe - the timestamp of DAC as embedded in the CLR image
// pdwDacSizeOfImage - the SizeOfImage of DAC as embedded in the CLR image
// pDacName - output, the filename of DAC (as calculated by this function but that might change)
// dwDacNameCharCount - input, the number of WCHARs in the buffer pointed to by pDacName
HRESULT CLRDebuggingImpl::GetCLRInfo(ICorDebugDataTarget* pDataTarget,
ULONG64 moduleBaseAddress,
CLR_DEBUGGING_VERSION* pVersion,
DWORD* pdwDbiTimeStamp,
DWORD* pdwDbiSizeOfImage,
__out_z __inout_ecount(dwDbiNameCharCount) WCHAR* pDbiName,
DWORD dwDbiNameCharCount,
DWORD* pdwDacTimeStamp,
DWORD* pdwDacSizeOfImage,
__out_z __inout_ecount(dwDacNameCharCount) WCHAR* pDacName,
DWORD dwDacNameCharCount)
{
#ifndef FEATURE_PAL
WORD imageFileMachine = 0;
DWORD resourceSectionRVA = 0;
HRESULT hr = GetMachineAndResourceSectionRVA(pDataTarget, moduleBaseAddress, &imageFileMachine, &resourceSectionRVA);
// We want the version resource which has type = RT_VERSION = 16, name = 1, language = 0x409
DWORD versionResourceRVA = 0;
DWORD versionResourceSize = 0;
if(SUCCEEDED(hr))
{
hr = GetResourceRvaFromResourceSectionRva(pDataTarget, moduleBaseAddress, resourceSectionRVA, 16, 1, 0x409,
&versionResourceRVA, &versionResourceSize);
}
// At last we get our version info
VS_FIXEDFILEINFO fixedFileInfo = {0};
if(SUCCEEDED(hr))
{
// The version resource has 3 words, then the unicode string "VS_VERSION_INFO"
// (16 WCHARS including the null terminator)
// then padding to a 32-bit boundary, then the VS_FIXEDFILEINFO struct
DWORD fixedFileInfoRVA = ((versionResourceRVA + 3*2 + 16*2 + 3)/4)*4;
hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + fixedFileInfoRVA, (BYTE*)&fixedFileInfo, sizeof(fixedFileInfo));
}
//Verify the signature on the version resource
if(SUCCEEDED(hr) && fixedFileInfo.dwSignature != PE_FIXEDFILEINFO_SIGNATURE)
{
hr = CORDBG_E_NOT_CLR;
}
// Record the version information
if(SUCCEEDED(hr))
{
pVersion->wMajor = (WORD) (fixedFileInfo.dwProductVersionMS >> 16);
pVersion->wMinor = (WORD) (fixedFileInfo.dwProductVersionMS & 0xFFFF);
pVersion->wBuild = (WORD) (fixedFileInfo.dwProductVersionLS >> 16);
pVersion->wRevision = (WORD) (fixedFileInfo.dwProductVersionLS & 0xFFFF);
}
// Now grab the special clr debug info resource
// We may need to scan a few different names searching though...
// 1) CLRDEBUGINFO<host_os><host_arch> where host_os = 'WINDOWS' or 'CORESYS' and host_arch = 'X86' or 'ARM' or 'AMD64'
// 2) For back-compat if the host os is windows and the host architecture matches the target then CLRDEBUGINFO is used with no suffix.
DWORD debugResourceRVA = 0;
DWORD debugResourceSize = 0;
BOOL useCrossPlatformNaming = FALSE;
if(SUCCEEDED(hr))
{
// the initial state is that we haven't found a proper resource
HRESULT hrGetResource = E_FAIL;
// First check for the resource which has type = RC_DATA = 10, name = "CLRDEBUGINFO<host_os><host_arch>", language = 0
#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_X86_)
const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSX86");
#endif
#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_X86_)
const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSX86");
#endif
#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_AMD64_)
const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSAMD64");
#endif
#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_AMD64_)
const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSAMD64");
#endif
#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM64_)
const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM64");
#endif
#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM64_)
const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM64");
#endif
#if defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM_)
const WCHAR * resourceName = W("CLRDEBUGINFOWINDOWSARM");
#endif
#if !defined (HOST_IS_WINDOWS_OS) && defined(_HOST_ARM_)
const WCHAR * resourceName = W("CLRDEBUGINFOCORESYSARM");
#endif
hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, resourceName, 0,
&debugResourceRVA, &debugResourceSize);
useCrossPlatformNaming = SUCCEEDED(hrGetResource);
#if defined(HOST_IS_WINDOWS_OS) && (defined(_HOST_X86_) || defined(_HOST_AMD64_) || defined(_HOST_ARM_))
#if defined(_HOST_X86_)
#define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_I386
#elif defined(_HOST_AMD64_)
#define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_AMD64
#elif defined(_HOST_ARM_)
#define _HOST_MACHINE_TYPE IMAGE_FILE_MACHINE_ARMNT
#endif
// if this is windows, and if host_arch matches target arch then we can fallback to searching for CLRDEBUGINFO on failure
if(FAILED(hrGetResource) && (imageFileMachine == _HOST_MACHINE_TYPE))
{
hrGetResource = GetResourceRvaFromResourceSectionRvaByName(pDataTarget, moduleBaseAddress, resourceSectionRVA, 10, W("CLRDEBUGINFO"), 0,
&debugResourceRVA, &debugResourceSize);
}
#undef _HOST_MACHINE_TYPE
#endif
// if the search failed, we don't recognize the CLR
if(FAILED(hrGetResource))
hr = CORDBG_E_NOT_CLR;
}
CLR_DEBUG_RESOURCE debugResource;
if(SUCCEEDED(hr) && debugResourceSize != sizeof(debugResource))
{
hr = CORDBG_E_NOT_CLR;
}
// Get the special debug resource from the image and return the results
if(SUCCEEDED(hr))
{
hr = ReadFromDataTarget(pDataTarget, moduleBaseAddress + debugResourceRVA, (BYTE*)&debugResource, sizeof(debugResource));
}
if(SUCCEEDED(hr) && (debugResource.dwVersion != 0))
{
hr = CORDBG_E_NOT_CLR;
}
// The signature needs to match m_skuId exactly, except for m_skuId=CLR_ID_ONECORE_CLR which is
// also compatible with the older CLR_ID_PHONE_CLR signature.
if(SUCCEEDED(hr) &&
(debugResource.signature != m_skuId) &&
!( (debugResource.signature == CLR_ID_PHONE_CLR) && (m_skuId == CLR_ID_ONECORE_CLR) ))
{
hr = CORDBG_E_NOT_CLR;
}
if(SUCCEEDED(hr) &&
(debugResource.signature != CLR_ID_ONECORE_CLR) &&
useCrossPlatformNaming)
{
FormatLongDacModuleName(pDacName, dwDacNameCharCount, imageFileMachine, &fixedFileInfo);
swprintf_s(pDbiName, dwDbiNameCharCount, W("%s_%s.dll"), MAIN_DBI_MODULE_NAME_W, W("x86"));
}
else
{
if(m_skuId == CLR_ID_V4_DESKTOP)
swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CLR_DAC_MODULE_NAME_W);
else
swprintf_s(pDacName, dwDacNameCharCount, W("%s.dll"), CORECLR_DAC_MODULE_NAME_W);
swprintf_s(pDbiName, dwDbiNameCharCount, W("%s.dll"), MAIN_DBI_MODULE_NAME_W);
}
if(SUCCEEDED(hr))
{
*pdwDbiTimeStamp = debugResource.dwDbiTimeStamp;
*pdwDbiSizeOfImage = debugResource.dwDbiSizeOfImage;
*pdwDacTimeStamp = debugResource.dwDacTimeStamp;
*pdwDacSizeOfImage = debugResource.dwDacSizeOfImage;
}
// any failure should be interpreted as this module not being a CLR
if(FAILED(hr))
{
return CORDBG_E_NOT_CLR;
}
else
{
return S_OK;
}
#else
swprintf_s(pDacName, dwDacNameCharCount, W("%s"), MAKEDLLNAME_W(CORECLR_DAC_MODULE_NAME_W));
swprintf_s(pDbiName, dwDbiNameCharCount, W("%s"), MAKEDLLNAME_W(MAIN_DBI_MODULE_NAME_W));
pVersion->wMajor = 0;
pVersion->wMinor = 0;
pVersion->wBuild = 0;
pVersion->wRevision = 0;
*pdwDbiTimeStamp = 0;
*pdwDbiSizeOfImage = 0;
*pdwDacTimeStamp = 0;
*pdwDacSizeOfImage = 0;
return S_OK;
#endif // FEATURE_PAL
}
// Formats the long name for DAC
HRESULT CLRDebuggingImpl::FormatLongDacModuleName(__out_z __inout_ecount(cchBuffer) WCHAR * pBuffer,
DWORD cchBuffer,
DWORD targetImageFileMachine,
VS_FIXEDFILEINFO * pVersion)
{
#ifndef HOST_IS_WINDOWS_OS
_ASSERTE(!"NYI");
return E_NOTIMPL;
#endif
#if defined(_HOST_X86_)
const WCHAR* pHostArch = W("x86");
#elif defined(_HOST_AMD64_)
const WCHAR* pHostArch = W("amd64");
#elif defined(_HOST_ARM_)
const WCHAR* pHostArch = W("arm");
#elif defined(_HOST_ARM64_)
const WCHAR* pHostArch = W("arm64");
#else
_ASSERTE(!"Unknown host arch");
return E_NOTIMPL;
#endif
const WCHAR* pDacBaseName = NULL;
if(m_skuId == CLR_ID_V4_DESKTOP)
pDacBaseName = CLR_DAC_MODULE_NAME_W;
else if(m_skuId == CLR_ID_CORECLR || m_skuId == CLR_ID_PHONE_CLR || m_skuId == CLR_ID_ONECORE_CLR)
pDacBaseName = CORECLR_DAC_MODULE_NAME_W;
else
{
_ASSERTE(!"Unknown SKU id");
return E_UNEXPECTED;
}
const WCHAR* pTargetArch = NULL;
if(targetImageFileMachine == IMAGE_FILE_MACHINE_I386)
{
pTargetArch = W("x86");
}
else if(targetImageFileMachine == IMAGE_FILE_MACHINE_AMD64)
{
pTargetArch = W("amd64");
}
else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARMNT)
{
pTargetArch = W("arm");
}
else if(targetImageFileMachine == IMAGE_FILE_MACHINE_ARM64)
{
pTargetArch = W("arm64");
}
else
{
_ASSERTE(!"Unknown target image file machine type");
return E_INVALIDARG;
}
const WCHAR* pBuildFlavor = W("");
if(pVersion->dwFileFlags & VS_FF_DEBUG)
{
if(pVersion->dwFileFlags & VS_FF_SPECIALBUILD)
pBuildFlavor = W(".dbg");
else
pBuildFlavor = W(".chk");
}
// WARNING: if you change the formatting make sure you recalculate the maximum
// possible size string and verify callers pass a big enough buffer. This doesn't
// have to be a tight estimate, just make sure its >= the biggest possible DAC name
// and it can be calculated statically
DWORD minCchBuffer =
(DWORD) wcslen(CLR_DAC_MODULE_NAME_W) + (DWORD) wcslen(CORECLR_DAC_MODULE_NAME_W) + // max name
10 + // max host arch
10 + // max target arch
40 + // max version
10 + // max build flavor
(DWORD) wcslen(W("name_host_target_version.flavor.dll")) + // max intermediate formatting chars
1; // null terminator
// validate the output buffer is larger than our estimate above
_ASSERTE(cchBuffer >= minCchBuffer);
if(!(cchBuffer >= minCchBuffer)) return E_INVALIDARG;
swprintf_s(pBuffer, cchBuffer, W("%s_%s_%s_%u.%u.%u.%02u%s.dll"),
pDacBaseName,
pHostArch,
pTargetArch,
pVersion->dwProductVersionMS >> 16,
pVersion->dwProductVersionMS & 0xFFFF,
pVersion->dwProductVersionLS >> 16,
pVersion->dwProductVersionLS & 0xFFFF,
pBuildFlavor);
return S_OK;
}
// An implementation of ICLRDebugging::CanUnloadNow
//
// Arguments:
// hModule - a handle to a module provided earlier by ProvideLibrary
//
// Returns:
// S_OK if the library is no longer in use and can be unloaded, S_FALSE otherwise
//
STDMETHODIMP CLRDebuggingImpl::CanUnloadNow(HMODULE hModule)
{
// In V4 at least we don't support any unloading.
HRESULT hr = S_FALSE;
return hr;
}
STDMETHODIMP CLRDebuggingImpl::QueryInterface(REFIID riid, void **ppvObject)
{
HRESULT hr = S_OK;
if (riid == __uuidof(IUnknown))
{
IUnknown *pItf = static_cast<IUnknown *>(this);
pItf->AddRef();
*ppvObject = pItf;
}
else if (riid == __uuidof(ICLRDebugging))
{
ICLRDebugging *pItf = static_cast<ICLRDebugging *>(this);
pItf->AddRef();
*ppvObject = pItf;
}
else
hr = E_NOINTERFACE;
return hr;
}
// Standard AddRef implementation
ULONG CLRDebuggingImpl::AddRef()
{
return InterlockedIncrement(&m_cRef);
}
// Standard Release implementation.
ULONG CLRDebuggingImpl::Release()
{
_ASSERTE(m_cRef > 0);
ULONG cRef = InterlockedDecrement(&m_cRef);
if (cRef == 0)
delete this; // Relies on virtual dtor to work properly.
return cRef;
}
| 1 | 10,997 | It would be nice to replace these _HOST_* defines with HOST_*. | dotnet-diagnostics | cpp |
@@ -299,5 +299,9 @@ func doList(dir string, out io.Writer, arg, indent string) error {
// scrub removes dynamic content from recorded files.
func scrub(rootDir string, b []byte) []byte {
const scrubbedRootDir = "[ROOTDIR]"
- return bytes.ReplaceAll(b, []byte(rootDir), []byte(scrubbedRootDir))
+ rootDirWithSeparator := rootDir + string(filepath.Separator)
+ scrubbedRootDirWithSeparator := scrubbedRootDir + string(filepath.Separator)
+ b = bytes.Replace(b, []byte(rootDirWithSeparator), []byte(scrubbedRootDirWithSeparator), -1)
+ b = bytes.Replace(b, []byte(rootDir), []byte(scrubbedRootDir), -1)
+ return b
} | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
)
var record = flag.Bool("record", false, "true to record the desired output for record/replay testcases")
func TestProcessContextModuleRoot(t *testing.T) {
ctx := context.Background()
t.Run("SameDirAsModule", func(t *testing.T) {
pctx, cleanup, err := newTestProject(ctx)
if err != nil {
t.Fatal(err)
}
defer cleanup()
got, err := pctx.ModuleRoot(ctx)
if got != pctx.workdir || err != nil {
t.Errorf("got %q/%v want %q/<nil>", got, err, pctx.workdir)
}
})
t.Run("NoBiomesDir", func(t *testing.T) {
pctx, cleanup, err := newTestProject(ctx)
if err != nil {
t.Fatal(err)
}
defer cleanup()
if err := os.RemoveAll(biomesRootDir(pctx.workdir)); err != nil {
t.Fatal(err)
}
if _, err = pctx.ModuleRoot(ctx); err == nil {
t.Errorf("got nil error, want non-nil error due to missing biomes/ dir")
}
})
t.Run("NoModFile", func(t *testing.T) {
pctx, cleanup, err := newTestProject(ctx)
if err != nil {
t.Fatal(err)
}
defer cleanup()
if err := os.Remove(filepath.Join(pctx.workdir, "go.mod")); err != nil {
t.Fatal(err)
}
if _, err = pctx.ModuleRoot(ctx); err == nil {
t.Errorf("got nil error, want non-nil error due to missing go.mod")
}
})
t.Run("ParentDirectory", func(t *testing.T) {
pctx, cleanup, err := newTestProject(ctx)
if err != nil {
t.Fatal(err)
}
defer cleanup()
rootdir := pctx.workdir
subdir := filepath.Join(rootdir, "subdir")
if err := os.Mkdir(subdir, 0777); err != nil {
t.Fatal(err)
}
pctx.workdir = subdir
got, err := pctx.ModuleRoot(ctx)
if got != rootdir || err != nil {
t.Errorf("got %q/%v want %q/<nil>", got, err, rootdir)
}
})
}
// A record/replay testcase.
type testCase struct {
// The directory where the testcase was found, e.g. testdata/recordreplay/Foo.
dir string
// The list of commands to execute.
//
// Empty lines and lines starting with "#" are ignored.
//
// By default, commands are expected to succeed, and the test will fail
// otherwise. However, commands that are expected to fail can be marked
// with a " --> FAIL" suffix.
//
// The following commands are supported:
// - gocdk: Executed through run().
// - cd: Takes exactly 1 argument, which cannot have "/" in it.
// Changes the working directory.
// - ls: With no arguments, recursively lists the files in the current
// working directory. With one argument, which cannot have "/" in it,
// recursively lists the file or directory named in the argument.
commands []string
// The desired success/failure for each command in commands.
wantFail []bool
// The desired STDOUT and STDERR (merged).
//
// In addition to the actual command output, the executed commands are written
// to both, prefixed with "$ ", and a blank line is inserted after each
// command. For example, the commands "ls" in a directory with a single file,
// foo.txt, would result in:
// $ ls
// foo.txt
//
want []byte
}
// newTestCase reads a test case from dir.
//
// The directory structure is:
//
// commands.txt: Commands to execute, one per line.
// out.txt: Expected STDOUT/STDERR output.
//
// See the testCase struct docstring for more info on each of the above.
func newTestCase(dir string, record bool) (*testCase, error) {
const failMarker = " --> FAIL"
name := filepath.Base(dir)
tc := &testCase{dir: dir}
commandsFile := filepath.Join(dir, "commands.txt")
commandsBytes, err := ioutil.ReadFile(commandsFile)
if err != nil {
return nil, fmt.Errorf("load test case %s: %v", name, err)
}
for _, line := range strings.Split(string(commandsBytes), "\n") {
if line == "" || strings.HasPrefix(line, "#") {
continue
}
wantFail := false
if strings.HasSuffix(line, failMarker) {
line = strings.TrimSuffix(line, failMarker)
wantFail = true
}
tc.commands = append(tc.commands, line)
tc.wantFail = append(tc.wantFail, wantFail)
}
if !record {
tc.want, err = ioutil.ReadFile(filepath.Join(dir, "out.txt"))
if err != nil {
return nil, err
}
}
return tc, nil
}
func TestRecordReplay(t *testing.T) {
const testRoot = "testdata/recordreplay"
testcaseDirs, err := ioutil.ReadDir(testRoot)
if err != nil {
t.Fatal(err)
}
testcases := make([]*testCase, 0, len(testcaseDirs))
for _, dir := range testcaseDirs {
testcase, err := newTestCase(filepath.Join(testRoot, dir.Name()), *record)
if err != nil {
t.Fatal(err)
}
testcases = append(testcases, testcase)
}
ctx := context.Background()
for _, tc := range testcases {
tc := tc
t.Run(filepath.Base(tc.dir), func(t *testing.T) {
t.Parallel()
rootDir, err := ioutil.TempDir("", testTempDirPrefix)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootDir)
var out bytes.Buffer
curDir := rootDir
pctx := newProcessContext(curDir, strings.NewReader(""), &out, &out)
pctx.env = append(pctx.env, "GOPATH="+rootDir)
for i, command := range tc.commands {
fmt.Fprintln(&out, "$", command)
args := strings.Split(command, " ")
cmd := args[0]
args = args[1:]
// We can add support for additional commands as needed; see
// https://github.com/golang/go/blob/master/src/cmd/go/testdata/script/README
// for inspiration.
var cmdErr error
switch cmd {
case "gocdk":
cmdErr = run(ctx, pctx, args)
case "cd":
if len(args) != 1 {
t.Fatalf("command #%d: cd takes exactly 1 argument", i)
}
if strings.Contains(args[0], "/") {
t.Fatalf("command #%d: argument to cd must be in the current directory (%q has a '/')", i, args[0])
}
curDir = filepath.Join(curDir, args[0])
pctx.workdir = curDir
case "ls":
var arg string
if len(args) == 1 {
arg = args[0]
if strings.Contains(arg, "/") {
t.Fatalf("command #%d: argument to ls must be in the current directory (%q has a '/')", i, arg)
}
} else if len(args) > 1 {
t.Fatalf("command #%d: ls takes 0-1 arguments (got %d)", i, len(args))
}
cmdErr = doList(curDir, &out, arg, "")
default:
t.Fatalf("unknown command #%d (%q)", i, command)
}
if cmdErr == nil && tc.wantFail[i] {
t.Fatalf("command #%d (%q) succeeded, but it was expected to fail", i, command)
}
if cmdErr != nil && !tc.wantFail[i] {
t.Fatalf("command #%d (%q) failed with %v", i, command, cmdErr)
}
fmt.Fprintln(&out)
}
got := scrub(rootDir, out.Bytes())
if *record {
if err := ioutil.WriteFile(filepath.Join(tc.dir, "out.txt"), got, 0666); err != nil {
t.Fatalf("failed to record out.txt to testdata: %v", err)
}
} else {
// Split to string lines to make diff output cleaner.
gotLines := strings.Split(string(got), "\n")
wantLines := strings.Split(string(tc.want), "\n")
if diff := cmp.Diff(wantLines, gotLines); diff != "" {
t.Errorf("out mismatch:\n%s", diff)
}
}
})
}
}
// doList recursively lists the files/directory in dir to out.
// If arg is not empty, it only lists arg.
func doList(dir string, out io.Writer, arg, indent string) error {
fileInfos, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
// Sort case-insensitive. ioutil.ReadDir returns results in sorted order,
// but it's not stable across platforms.
sort.Slice(fileInfos, func(i, j int) bool {
return strings.ToLower(fileInfos[i].Name()) < strings.ToLower(fileInfos[j].Name())
})
for _, fi := range fileInfos {
if arg != "" && arg != fi.Name() {
continue
}
if fi.IsDir() {
fmt.Fprintf(out, "%s%s/\n", indent, fi.Name())
if err := doList(filepath.Join(dir, fi.Name()), out, "", indent+" "); err != nil {
return err
}
} else {
fmt.Fprintf(out, "%s%s\n", indent, fi.Name())
}
}
return nil
}
// scrub removes dynamic content from recorded files.
func scrub(rootDir string, b []byte) []byte {
const scrubbedRootDir = "[ROOTDIR]"
return bytes.ReplaceAll(b, []byte(rootDir), []byte(scrubbedRootDir))
}
| 1 | 18,171 | The problem here was that the record file started with something like `/tmp/whatever/myproj` and was scrubbed to `[ROOTDIR]/myproj`, but the `/` is a `\` on Windows. | google-go-cloud | go |
@@ -33,10 +33,13 @@
#include <rtps/builtin/data/ProxyHashTables.hpp>
+#include "../../../fastdds/core/policy/ParameterList.hpp"
+
#include <mutex>
#include <chrono>
using namespace eprosima::fastrtps;
+using ParameterList = eprosima::fastdds::dds::ParameterList;
namespace eprosima {
namespace fastrtps { | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file ParticipantProxyData.cpp
*
*/
#include <fastrtps_deprecated/participant/ParticipantImpl.h>
#include <fastdds/rtps/builtin/data/ParticipantProxyData.h>
#include <fastdds/rtps/builtin/data/WriterProxyData.h>
#include <fastdds/rtps/builtin/data/ReaderProxyData.h>
#include <rtps/builtin/data/ProxyDataFilters.hpp>
#include <fastdds/rtps/builtin/discovery/participant/PDPSimple.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/network/NetworkFactory.h>
#include <rtps/transport/shared_mem/SHMLocator.hpp>
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/utils/TimeConversion.h>
#include <fastdds/core/policy/QosPoliciesSerializer.hpp>
#include <rtps/builtin/data/ProxyHashTables.hpp>
#include <mutex>
#include <chrono>
using namespace eprosima::fastrtps;
namespace eprosima {
namespace fastrtps {
namespace rtps {
ParticipantProxyData::ParticipantProxyData(
const RTPSParticipantAllocationAttributes& allocation)
: m_protocolVersion(c_ProtocolVersion)
, m_VendorId(c_VendorId_Unknown)
, m_expectsInlineQos(false)
, m_availableBuiltinEndpoints(0)
, metatraffic_locators(allocation.locators.max_unicast_locators, allocation.locators.max_multicast_locators)
, default_locators(allocation.locators.max_unicast_locators, allocation.locators.max_multicast_locators)
, m_manualLivelinessCount ()
#if HAVE_SECURITY
, security_attributes_(0UL)
, plugin_security_attributes_(0UL)
#endif
, isAlive(false)
, m_properties(static_cast<uint32_t>(allocation.data_limits.max_properties))
, lease_duration_event(nullptr)
, should_check_lease_duration(false)
, m_readers(new ProxyHashTable<ReaderProxyData>(allocation.readers))
, m_writers(new ProxyHashTable<WriterProxyData>(allocation.writers))
{
m_userData.set_max_size(static_cast<uint32_t>(allocation.data_limits.max_user_data));
}
ParticipantProxyData::ParticipantProxyData(
const ParticipantProxyData& pdata)
: m_protocolVersion(pdata.m_protocolVersion)
, m_guid(pdata.m_guid)
, m_VendorId(pdata.m_VendorId)
, m_expectsInlineQos(pdata.m_expectsInlineQos)
, m_availableBuiltinEndpoints(pdata.m_availableBuiltinEndpoints)
, metatraffic_locators(pdata.metatraffic_locators)
, default_locators(pdata.default_locators)
, m_manualLivelinessCount ()
, m_participantName(pdata.m_participantName)
, m_key(pdata.m_key)
, m_leaseDuration(pdata.m_leaseDuration)
#if HAVE_SECURITY
, identity_token_(pdata.identity_token_)
, permissions_token_(pdata.permissions_token_)
, security_attributes_(pdata.security_attributes_)
, plugin_security_attributes_(pdata.plugin_security_attributes_)
#endif
, isAlive(pdata.isAlive)
, m_properties(pdata.m_properties)
, m_userData(pdata.m_userData)
, lease_duration_event(nullptr)
, should_check_lease_duration(false)
// This method is only called when calling the participant discovery listener and the
// corresponding DiscoveredParticipantInfo struct is created. Only participant info is used,
// so there is no need to copy m_readers and m_writers
, m_readers(nullptr)
, m_writers(nullptr)
, lease_duration_(pdata.lease_duration_)
{
}
ParticipantProxyData::~ParticipantProxyData()
{
logInfo(RTPS_PARTICIPANT, m_guid);
// delete all reader proxies
if (m_readers)
{
for (ProxyHashTable<ReaderProxyData>::value_type val : *m_readers)
{
delete val.second;
}
delete m_readers;
}
// delete all writers proxies
if (m_writers)
{
for (ProxyHashTable<WriterProxyData>::value_type val : *m_writers)
{
delete val.second;
}
delete m_writers;
}
if (lease_duration_event != nullptr)
{
delete lease_duration_event;
}
}
uint32_t ParticipantProxyData::get_serialized_size(
bool include_encapsulation) const
{
uint32_t ret_val = include_encapsulation ? 4 : 0;
// PID_PROTOCOL_VERSION
ret_val += 4 + 4;
// PID_VENDORID
ret_val += 4 + 4;
if (m_expectsInlineQos)
{
// PID_EXPECTS_INLINE_QOS
ret_val += 4 + PARAMETER_BOOL_LENGTH;
}
// PID_PARTICIPANT_GUID
ret_val += 4 + PARAMETER_GUID_LENGTH;
// PID_METATRAFFIC_MULTICAST_LOCATOR
ret_val += static_cast<uint32_t>((4 + PARAMETER_LOCATOR_LENGTH) * metatraffic_locators.multicast.size());
// PID_METATRAFFIC_UNICAST_LOCATOR
ret_val += static_cast<uint32_t>((4 + PARAMETER_LOCATOR_LENGTH) * metatraffic_locators.unicast.size());
// PID_DEFAULT_UNICAST_LOCATOR
ret_val += static_cast<uint32_t>((4 + PARAMETER_LOCATOR_LENGTH) * default_locators.unicast.size());
// PID_DEFAULT_MULTICAST_LOCATOR
ret_val += static_cast<uint32_t>((4 + PARAMETER_LOCATOR_LENGTH) * default_locators.multicast.size());
// PID_PARTICIPANT_LEASE_DURATION
ret_val += 4 + PARAMETER_TIME_LENGTH;
// PID_BUILTIN_ENDPOINT_SET
ret_val += 4 + PARAMETER_BUILTINENDPOINTSET_LENGTH;
if (m_participantName.size() > 0)
{
// PID_ENTITY_NAME
ret_val += fastdds::dds::ParameterSerializer<Parameter_t>::cdr_serialized_size(m_participantName);
}
if (m_userData.size() > 0)
{
// PID_USER_DATA
ret_val += fastdds::dds::QosPoliciesSerializer<UserDataQosPolicy>::cdr_serialized_size(m_userData);
}
if (m_properties.size() > 0)
{
// PID_PROPERTY_LIST
ret_val += fastdds::dds::ParameterSerializer<ParameterPropertyList_t>::cdr_serialized_size(m_properties);
}
#if HAVE_SECURITY
if (!identity_token_.class_id().empty())
{
// PID_IDENTITY_TOKEN
ret_val += fastdds::dds::ParameterSerializer<Parameter_t>::cdr_serialized_size(identity_token_);
}
if (!permissions_token_.class_id().empty())
{
// PID_PERMISSIONS_TOKEN
ret_val += fastdds::dds::ParameterSerializer<Parameter_t>::cdr_serialized_size(permissions_token_);
}
if ((security_attributes_ != 0UL) || (plugin_security_attributes_ != 0UL))
{
// PID_PARTICIPANT_SECURITY_INFO
ret_val += 4 + PARAMETER_PARTICIPANT_SECURITY_INFO_LENGTH;
}
#endif
// PID_SENTINEL
return ret_val + 4;
}
bool ParticipantProxyData::writeToCDRMessage(
CDRMessage_t* msg,
bool write_encapsulation)
{
if (write_encapsulation)
{
if (!ParameterList::writeEncapsulationToCDRMsg(msg))
{
return false;
}
}
{
ParameterProtocolVersion_t p(fastdds::dds::PID_PROTOCOL_VERSION, 4);
p.protocolVersion = this->m_protocolVersion;
if (!fastdds::dds::ParameterSerializer<ParameterProtocolVersion_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
{
ParameterVendorId_t p(fastdds::dds::PID_VENDORID, 4);
p.vendorId[0] = this->m_VendorId[0];
p.vendorId[1] = this->m_VendorId[1];
if (!fastdds::dds::ParameterSerializer<ParameterVendorId_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
if (this->m_expectsInlineQos)
{
ParameterBool_t p(fastdds::dds::PID_EXPECTS_INLINE_QOS, PARAMETER_BOOL_LENGTH, m_expectsInlineQos);
if (!fastdds::dds::ParameterSerializer<ParameterBool_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
{
ParameterGuid_t p(fastdds::dds::PID_PARTICIPANT_GUID, PARAMETER_GUID_LENGTH, m_guid);
if (!fastdds::dds::ParameterSerializer<ParameterGuid_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
for (const Locator_t& it : metatraffic_locators.multicast)
{
ParameterLocator_t p(fastdds::dds::PID_METATRAFFIC_MULTICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
for (const Locator_t& it : metatraffic_locators.unicast)
{
ParameterLocator_t p(fastdds::dds::PID_METATRAFFIC_UNICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
for (const Locator_t& it : default_locators.unicast)
{
ParameterLocator_t p(fastdds::dds::PID_DEFAULT_UNICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
for (const Locator_t& it : default_locators.multicast)
{
ParameterLocator_t p(fastdds::dds::PID_DEFAULT_MULTICAST_LOCATOR, PARAMETER_LOCATOR_LENGTH, it);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
{
ParameterTime_t p(fastdds::dds::PID_PARTICIPANT_LEASE_DURATION, PARAMETER_TIME_LENGTH);
p.time = m_leaseDuration;
if (!fastdds::dds::ParameterSerializer<ParameterTime_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
{
ParameterBuiltinEndpointSet_t p(fastdds::dds::PID_BUILTIN_ENDPOINT_SET, PARAMETER_BUILTINENDPOINTSET_LENGTH);
p.endpointSet = m_availableBuiltinEndpoints;
if (!fastdds::dds::ParameterSerializer<ParameterBuiltinEndpointSet_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
if (m_participantName.size() > 0)
{
ParameterString_t p(fastdds::dds::PID_ENTITY_NAME, 0, m_participantName);
if (!fastdds::dds::ParameterSerializer<ParameterString_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
if (m_userData.size() > 0)
{
if (!fastdds::dds::QosPoliciesSerializer<UserDataQosPolicy>::add_to_cdr_message(m_userData,
msg))
{
return false;
}
}
if (m_properties.size() > 0)
{
if (!fastdds::dds::ParameterSerializer<ParameterPropertyList_t>::add_to_cdr_message(m_properties, msg))
{
return false;
}
}
#if HAVE_SECURITY
if (!identity_token_.class_id().empty())
{
ParameterToken_t p(fastdds::dds::PID_IDENTITY_TOKEN, 0);
p.token = identity_token_;
if (!fastdds::dds::ParameterSerializer<ParameterToken_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
if (!permissions_token_.class_id().empty())
{
ParameterToken_t p(fastdds::dds::PID_PERMISSIONS_TOKEN, 0);
p.token = permissions_token_;
if (!fastdds::dds::ParameterSerializer<ParameterToken_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
if ((security_attributes_ != 0UL) || (plugin_security_attributes_ != 0UL))
{
ParameterParticipantSecurityInfo_t p;
p.security_attributes = security_attributes_;
p.plugin_security_attributes = plugin_security_attributes_;
if (!fastdds::dds::ParameterSerializer<ParameterParticipantSecurityInfo_t>::add_to_cdr_message(p, msg))
{
return false;
}
}
#endif
return fastdds::dds::ParameterSerializer<Parameter_t>::add_parameter_sentinel(msg);
}
bool ParticipantProxyData::readFromCDRMessage(
CDRMessage_t* msg,
bool use_encapsulation,
const NetworkFactory& network,
bool is_shm_transport_available)
{
bool are_shm_metatraffic_locators_present = false;
bool are_shm_default_locators_present = false;
bool is_shm_transport_possible = false;
auto param_process = [this, &network, &is_shm_transport_possible,
&are_shm_metatraffic_locators_present,
&are_shm_default_locators_present,
&is_shm_transport_available](CDRMessage_t* msg, const ParameterId_t& pid, uint16_t plength)
{
switch (pid)
{
case fastdds::dds::PID_KEY_HASH:
{
ParameterKey_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterKey_t>::read_from_cdr_message(p, msg, plength))
{
return false;
}
GUID_t guid;
iHandle2GUID(guid, p.key);
m_guid = guid;
m_key = p.key;
break;
}
case fastdds::dds::PID_PROTOCOL_VERSION:
{
ParameterProtocolVersion_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterProtocolVersion_t>::read_from_cdr_message(p,
msg, plength))
{
return false;
}
if (p.protocolVersion.m_major < c_ProtocolVersion.m_major)
{
return false;
}
m_protocolVersion = p.protocolVersion;
break;
}
case fastdds::dds::PID_VENDORID:
{
ParameterVendorId_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterVendorId_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
m_VendorId[0] = p.vendorId[0];
m_VendorId[1] = p.vendorId[1];
is_shm_transport_available &= (m_VendorId == c_VendorId_eProsima);
break;
}
case fastdds::dds::PID_EXPECTS_INLINE_QOS:
{
ParameterBool_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterBool_t>::read_from_cdr_message(p, msg, plength))
{
return false;
}
m_expectsInlineQos = p.value;
break;
}
case fastdds::dds::PID_PARTICIPANT_GUID:
{
ParameterGuid_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterGuid_t>::read_from_cdr_message(p, msg, plength))
{
return false;
}
m_guid = p.guid;
m_key = p.guid;
break;
}
case fastdds::dds::PID_METATRAFFIC_MULTICAST_LOCATOR:
{
ParameterLocator_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
Locator_t temp_locator;
if (network.transform_remote_locator(p.locator, temp_locator))
{
ProxyDataFilters::filter_locators(
is_shm_transport_available,
&is_shm_transport_possible,
&are_shm_metatraffic_locators_present,
&metatraffic_locators,
temp_locator,
false);
}
break;
}
case fastdds::dds::PID_METATRAFFIC_UNICAST_LOCATOR:
{
ParameterLocator_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
Locator_t temp_locator;
if (network.transform_remote_locator(p.locator, temp_locator))
{
ProxyDataFilters::filter_locators(
is_shm_transport_available,
&is_shm_transport_possible,
&are_shm_metatraffic_locators_present,
&metatraffic_locators,
temp_locator,
true);
}
break;
}
case fastdds::dds::PID_DEFAULT_UNICAST_LOCATOR:
{
ParameterLocator_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
Locator_t temp_locator;
if (network.transform_remote_locator(p.locator, temp_locator))
{
ProxyDataFilters::filter_locators(
is_shm_transport_available,
&is_shm_transport_possible,
&are_shm_default_locators_present,
&default_locators,
temp_locator,
true);
}
break;
}
case fastdds::dds::PID_DEFAULT_MULTICAST_LOCATOR:
{
ParameterLocator_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterLocator_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
Locator_t temp_locator;
if (network.transform_remote_locator(p.locator, temp_locator))
{
ProxyDataFilters::filter_locators(
is_shm_transport_available,
&is_shm_transport_possible,
&are_shm_default_locators_present,
&default_locators,
temp_locator,
false);
}
break;
}
case fastdds::dds::PID_PARTICIPANT_LEASE_DURATION:
{
ParameterTime_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterTime_t>::read_from_cdr_message(p, msg, plength))
{
return false;
}
m_leaseDuration = p.time.to_duration_t();
lease_duration_ =
std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(
m_leaseDuration));
break;
}
case fastdds::dds::PID_BUILTIN_ENDPOINT_SET:
{
ParameterBuiltinEndpointSet_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterBuiltinEndpointSet_t>::read_from_cdr_message(p,
msg, plength))
{
return false;
}
m_availableBuiltinEndpoints = p.endpointSet;
break;
}
case fastdds::dds::PID_ENTITY_NAME:
{
ParameterString_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterString_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
m_participantName = p.getName();
break;
}
case fastdds::dds::PID_PROPERTY_LIST:
{
if (!fastdds::dds::ParameterSerializer<ParameterPropertyList_t>::read_from_cdr_message(
m_properties, msg, plength))
{
return false;
}
break;
}
case fastdds::dds::PID_USER_DATA:
{
if (!fastdds::dds::QosPoliciesSerializer<UserDataQosPolicy>::read_from_cdr_message(m_userData,
msg, plength))
{
return false;
}
break;
}
case fastdds::dds::PID_IDENTITY_TOKEN:
{
#if HAVE_SECURITY
ParameterToken_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterToken_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
identity_token_ = std::move(p.token);
#else
logWarning(RTPS_PARTICIPANT, "Received PID_IDENTITY_TOKEN but security is disabled");
#endif
break;
}
case fastdds::dds::PID_PERMISSIONS_TOKEN:
{
#if HAVE_SECURITY
ParameterToken_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterToken_t>::read_from_cdr_message(p, msg,
plength))
{
return false;
}
permissions_token_ = std::move(p.token);
#else
logWarning(RTPS_PARTICIPANT, "Received PID_PERMISSIONS_TOKEN but security is disabled");
#endif
break;
}
case fastdds::dds::PID_PARTICIPANT_SECURITY_INFO:
{
#if HAVE_SECURITY
ParameterParticipantSecurityInfo_t p(pid, plength);
if (!fastdds::dds::ParameterSerializer<ParameterParticipantSecurityInfo_t>::
read_from_cdr_message(p, msg, plength))
{
return false;
}
security_attributes_ = p.security_attributes;
plugin_security_attributes_ = p.plugin_security_attributes;
#else
logWarning(RTPS_PARTICIPANT,
"Received PID_PARTICIPANT_SECURITY_INFO but security is disabled");
#endif
break;
}
default:
{
break;
}
}
return true;
};
uint32_t qos_size;
clear();
try
{
return ParameterList::readParameterListfromCDRMsg(*msg, param_process, use_encapsulation, qos_size);
}
catch (std::bad_alloc& ba)
{
std::cerr << "bad_alloc caught: " << ba.what() << '\n';
return false;
}
}
void ParticipantProxyData::clear()
{
m_protocolVersion = ProtocolVersion_t();
m_guid = GUID_t();
//set_VendorId_Unknown(m_VendorId);
m_VendorId = c_VendorId_Unknown;
m_expectsInlineQos = false;
m_availableBuiltinEndpoints = 0;
metatraffic_locators.unicast.clear();
metatraffic_locators.multicast.clear();
default_locators.unicast.clear();
default_locators.multicast.clear();
m_participantName = "";
m_key = InstanceHandle_t();
m_leaseDuration = Duration_t();
lease_duration_ = std::chrono::microseconds::zero();
isAlive = true;
#if HAVE_SECURITY
identity_token_ = IdentityToken();
permissions_token_ = PermissionsToken();
security_attributes_ = 0UL;
plugin_security_attributes_ = 0UL;
#endif
m_properties.clear();
m_properties.length = 0;
m_userData.clear();
m_userData.length = 0;
}
void ParticipantProxyData::copy(
const ParticipantProxyData& pdata)
{
m_protocolVersion = pdata.m_protocolVersion;
m_guid = pdata.m_guid;
m_VendorId[0] = pdata.m_VendorId[0];
m_VendorId[1] = pdata.m_VendorId[1];
m_availableBuiltinEndpoints = pdata.m_availableBuiltinEndpoints;
metatraffic_locators = pdata.metatraffic_locators;
default_locators = pdata.default_locators;
m_participantName = pdata.m_participantName;
m_leaseDuration = pdata.m_leaseDuration;
lease_duration_ = std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(pdata.m_leaseDuration));
m_key = pdata.m_key;
isAlive = pdata.isAlive;
m_userData = pdata.m_userData;
m_properties = pdata.m_properties;
// This method is only called when a new participant is discovered.The destination of the copy
// will always be a new ParticipantProxyData or one from the pool, so there is no need for
// m_readers and m_writers to be copied
#if HAVE_SECURITY
identity_token_ = pdata.identity_token_;
permissions_token_ = pdata.permissions_token_;
security_attributes_ = pdata.security_attributes_;
plugin_security_attributes_ = pdata.plugin_security_attributes_;
#endif
}
bool ParticipantProxyData::updateData(
ParticipantProxyData& pdata)
{
metatraffic_locators = pdata.metatraffic_locators;
default_locators = pdata.default_locators;
m_leaseDuration = pdata.m_leaseDuration;
isAlive = true;
m_userData = pdata.m_userData;
m_properties = pdata.m_properties;
#if HAVE_SECURITY
identity_token_ = pdata.identity_token_;
permissions_token_ = pdata.permissions_token_;
security_attributes_ = pdata.security_attributes_;
plugin_security_attributes_ = pdata.plugin_security_attributes_;
#endif
auto new_lease_duration = std::chrono::microseconds(TimeConv::Duration_t2MicroSecondsInt64(m_leaseDuration));
if (lease_duration_event != nullptr)
{
if (new_lease_duration < lease_duration_)
{
// Calculate next trigger.
auto real_lease_tm = last_received_message_tm_ + new_lease_duration;
auto next_trigger = real_lease_tm - std::chrono::steady_clock::now();
lease_duration_event->cancel_timer();
lease_duration_event->update_interval_millisec(
(double)std::chrono::duration_cast<std::chrono::milliseconds>(next_trigger).count());
lease_duration_event->restart_timer();
}
}
lease_duration_ = new_lease_duration;
return true;
}
void ParticipantProxyData::set_persistence_guid(
const GUID_t& guid)
{
// only valid values
if (guid == c_Guid_Unknown)
{
return;
}
// generate pair
std::pair<std::string, std::string> persistent_guid;
persistent_guid.first = "PID_PERSISTENCE_GUID";
std::ostringstream data;
data << guid;
persistent_guid.second = data.str();
// if exists replace
ParameterPropertyList_t::iterator it = std::find_if(
m_properties.begin(),
m_properties.end(),
[&persistent_guid](const fastdds::dds::ParameterProperty_t& p)
{
return persistent_guid.first == p.first();
});
if (it != m_properties.end())
{
if (!it->modify(persistent_guid))
{
logError(RTPS_PARTICIPANT, "Failed to change property <"
<< it->first() << " | " << it->second() << "> to <"
<< persistent_guid.first << " | " << persistent_guid.second << ">");
}
}
else
{
// if not exists add
m_properties.push_back(persistent_guid);
}
}
GUID_t ParticipantProxyData::get_persistence_guid() const
{
GUID_t persistent(c_Guid_Unknown);
ParameterPropertyList_t::const_iterator it = std::find_if(
m_properties.begin(),
m_properties.end(),
[](const fastdds::dds::ParameterProperty_t p)
{
return "PID_PERSISTENCE_GUID" == p.first();
});
if (it != m_properties.end())
{
std::istringstream in(it->second());
in >> persistent;
}
return persistent;
}
void ParticipantProxyData::assert_liveliness()
{
last_received_message_tm_ = std::chrono::steady_clock::now();
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 18,468 | Don't use relative paths. src directory is already on the include search path | eProsima-Fast-DDS | cpp |
@@ -177,8 +177,12 @@ type BuildTarget struct {
// Tools that this rule will use, ie. other rules that it may use at build time which are not
// copied into its source directory.
Tools []BuildInput
+ // Like tools but available to the test_cmd instead
+ TestTools []BuildInput
// Named tools, similar to named sources.
namedTools map[string][]BuildInput `name:"tools"`
+ // Named test tools, similar to named sources.
+ namedTestTools map[string][]BuildInput `name:"test_tools"`
// Target-specific environment passthroughs.
PassEnv *[]string `name:"pass_env"`
// Target-specific unsafe environment passthroughs. | 1 | package core
import (
"fmt"
"github.com/thought-machine/please/src/fs"
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"time"
)
// OutDir is the root output directory for everything.
const OutDir string = "plz-out"
// TmpDir is the root of the temporary directory for building targets & running tests.
const TmpDir string = "plz-out/tmp"
// GenDir is the output directory for non-binary targets.
const GenDir string = "plz-out/gen"
// BinDir is the output directory for binary targets.
const BinDir string = "plz-out/bin"
// DefaultBuildingDescription is the default description for targets when they're building.
const DefaultBuildingDescription = "Building..."
// SandboxDir is the directory that sandboxed actions are run in.
const SandboxDir = "/tmp/plz_sandbox"
// Suffixes for temporary directories
const buildDirSuffix = "._build"
const testDirSuffix = "._test"
// TestResultsFile is the file that targets output their test results into.
// This is normally defined for them via an environment variable.
const TestResultsFile = "test.results"
// CoverageFile is the file that targets output coverage information into.
// This is similarly defined via an environment variable.
const CoverageFile = "test.coverage"
// TestResultsDirLabel is a known label that indicates that the test will output results
// into a directory rather than a file. Please can internally handle either but the remote
// execution API requires that we specify which is which.
const TestResultsDirLabel = "test_results_dir"
// tempOutputSuffix is the suffix we attach to temporary outputs to avoid name clashes.
const tempOutputSuffix = ".out"
// A BuildTarget is a representation of a build target and all information about it;
// its name, dependencies, build commands, etc.
type BuildTarget struct {
// N.B. The tags on these fields are used by query print to help it print them.
// Identifier of this build target
Label BuildLabel `name:"name"`
// If this target is in a subrepo, this will be the one it's in.
Subrepo *Subrepo `print:"false"`
// Dependencies of this target.
// Maps the original declaration to whatever dependencies actually got attached,
// which may be more than one in some cases. Also contains info about exporting etc.
dependencies []depInfo `name:"deps"`
// List of build target patterns that can use this build target.
Visibility []BuildLabel
// Source files of this rule. Can refer to build rules themselves.
Sources []BuildInput `name:"srcs"`
// Named source files of this rule; as above but identified by name.
NamedSources map[string][]BuildInput `name:"srcs"`
// Data files of this rule. Similar to sources but used at runtime, typically by tests.
Data []BuildInput `name:"data"`
// Data files of this rule by name.
namedData map[string][]BuildInput `name:"data"`
// Output files of this rule. All are paths relative to this package.
outputs []string `name:"outs"`
// Named output subsets of this rule. All are paths relative to this package but can be
// captured separately; for example something producing C code might separate its outputs
// into sources and headers.
namedOutputs map[string][]string `name:"outs"`
// Optional output files of this rule. Same as outs but aren't required to be produced always.
// Can be glob patterns.
OptionalOutputs []string `name:"optional_outs"`
// Optional labels applied to this rule. Used for including/excluding rules.
Labels []string
// Shell command to run.
Command string `name:"cmd" hide:"filegroup"`
// Per-configuration shell commands to run.
Commands map[string]string `name:"cmd" hide:"filegroup"`
// Shell command to run for test targets.
TestCommand string `name:"test_cmd"`
// Per-configuration test commands to run.
TestCommands map[string]string `name:"test_cmd"`
// Represents the state of this build target (see below)
state int32 `print:"false"`
// True if this target is a binary (ie. runnable, will appear in plz-out/bin)
IsBinary bool `name:"binary"`
// True if this target is a test
IsTest bool `name:"test"`
// Indicates that the target can only be depended on by tests or other rules with this set.
// Used to restrict non-deployable code and also affects coverage detection.
TestOnly bool `name:"test_only"`
// True if the build action is sandboxed.
Sandbox bool
// True if the test action is sandboxed.
TestSandbox bool `name:"test_sandbox"`
// True if the target is a test and has no output file.
// Default is false, meaning all tests must produce test.results as output.
NoTestOutput bool `name:"no_test_output"`
// True if this target needs access to its transitive dependencies to build.
// This would be false for most 'normal' genrules but true for eg. compiler steps
// that need to build in everything.
NeedsTransitiveDependencies bool `name:"needs_transitive_deps"`
// True if this target blocks recursive exploring for transitive dependencies.
// This is typically false for _library rules which aren't complete, and true
// for _binary rules which normally are, and genrules where you don't care about
// the inputs, only whatever they were turned into.
OutputIsComplete bool `name:"output_is_complete"`
// If true, the rule is given an env var at build time that contains the hash of its
// transitive dependencies, which can be used to identify the output in a predictable way.
Stamp bool
// If true, the target must be run locally (i.e. is not compatible with remote execution).
Local bool
// If true, the executed commands will exit whenever an error is encountered (i.e. shells
// are executed with -e).
ExitOnError bool
// If true, the target is needed for a subinclude and therefore we will have to make sure its
// outputs are available locally when built.
NeededForSubinclude bool `print:"false"`
// Marks the target as a filegroup.
IsFilegroup bool `print:"false"`
// Marks the target as a remote_file.
IsRemoteFile bool `print:"false"`
// Marks that the target was added in a post-build function.
AddedPostBuild bool `print:"false"`
// If true, the interactive progress display will try to infer the target's progress
// via some heuristics on its output.
ShowProgress bool `name:"progress"`
// If ShowProgress is true, this is used to store the current progress of the target.
Progress float32 `print:"false"`
// The results of this test target, if it is one.
Results TestSuite `print:"false"`
// The number of completed runs
completedRuns int `print:"false"`
// A mutex to control access to Results
resultsMux sync.Mutex `print:"false"`
// Description displayed while the command is building.
// Default is just "Building" but it can be customised.
BuildingDescription string `name:"building_description"`
// Acceptable hashes of the outputs of this rule. If the output doesn't match any of these
// it's an error at build time. Can be used to validate third-party deps.
Hashes []string
// Licences that this target is subject to.
Licences []string
// Any secrets that this rule requires.
// Secrets are similar to sources but are always absolute system paths and affect the hash
// differently; they are not used to determine the hash for retrieving a file from cache, but
// if changed locally will still force a rebuild. They're not copied into the source directory
// (or indeed anywhere by plz).
Secrets []string
// Named secrets of this rule; as above but identified by name.
NamedSecrets map[string][]string
// BUILD language functions to call before / after target is built. Allows deferred manipulation of the build graph.
PreBuildFunction PreBuildFunction `name:"pre_build"`
PostBuildFunction PostBuildFunction `name:"post_build"`
// Languages this rule requires. These are an arbitrary set and the only meaning is that they
// correspond to entries in Provides; if rules match up then it allows choosing a specific
// dependency (consider eg. code generated from protobufs; this mechanism allows us to expose
// one rule but only compile the appropriate code for each library that consumes it).
Requires []string
// Dependent rules this rule provides for each language. Matches up to Requires as described above.
Provides map[string]BuildLabel
// Stores the hash of this build rule before any post-build function is run.
RuleHash []byte `name:"exported_deps"` // bit of a hack to call this exported_deps...
// Tools that this rule will use, ie. other rules that it may use at build time which are not
// copied into its source directory.
Tools []BuildInput
// Named tools, similar to named sources.
namedTools map[string][]BuildInput `name:"tools"`
// Target-specific environment passthroughs.
PassEnv *[]string `name:"pass_env"`
// Target-specific unsafe environment passthroughs.
PassUnsafeEnv *[]string `name:"pass_unsafe_env"`
// Flakiness of test, ie. number of times we will rerun it before giving up. 1 is the default.
Flakiness int `name:"flaky"`
// Timeouts for build/test actions
BuildTimeout time.Duration `name:"timeout"`
TestTimeout time.Duration `name:"test_timeout"`
// Extra output files from the test.
// These are in addition to the usual test.results output file.
TestOutputs []string `name:"test_outputs"`
// OutputDirectories are the directories that outputs can be produced into which will be added to the root of the
// output for the rule. For example if an output directory "foo" contains "bar.txt" the rule will have the output
// "bar.txt"
OutputDirectories []OutputDirectory `name:"output_dirs"`
// RuleMetadata is the metadata attached to this build rule. It can be accessed through the "get_rule_metadata" BIF.
RuleMetadata interface{} `name:"config"`
}
// BuildMetadata is temporary metadata that's stored around a build target - we don't
// generally persist it indefinitely.
type BuildMetadata struct {
// Standard output & error
Stdout, Stderr []byte
// Serialised build action metadata.
RemoteAction []byte
// Time this action was written. Used for remote execution to determine if
// the action is stale and needs re-checking or not.
Timestamp time.Time
// Additional outputs from output directories serialised as a csv
OutputDirOuts []string
// True if this represents a test run.
Test bool
// True if the results were retrieved from a cache, false if we ran the full build action.
Cached bool
}
// A PreBuildFunction is a type that allows hooking a pre-build callback.
type PreBuildFunction interface {
fmt.Stringer
// Call calls this pre-build function
Call(target *BuildTarget) error
}
// A PostBuildFunction is a type that allows hooking a post-build callback.
type PostBuildFunction interface {
fmt.Stringer
// Call calls this pre-build function with this target and its output.
Call(target *BuildTarget, output string) error
}
type depInfo struct {
declared BuildLabel // the originally declared dependency
deps []*BuildTarget // list of actual deps
resolved bool // has the graph resolved it
exported bool // is it an exported dependency
internal bool // is it an internal dependency (that is not picked up implicitly by transitive searches)
source bool // is it implicit because it's a source (not true if it's a dependency too)
data bool // is it a data item for a test
}
// OutputDirectory is an output directory for the build rule. It may have a suffix of /** which means that we should
// traverse the directory tree adding each file individually rather than just adding whatever files/directories are in
// the top level.
type OutputDirectory string
// Dir returns the actual directory name for this output directory
func (o OutputDirectory) Dir() string {
return strings.TrimSuffix(string(o), "/**")
}
// ShouldAddFiles checks whether the contents of this directory should include all the files in the directory tree
// individually i.e. out_dir/net/thoughtmachine/Main.java -> net/thoughtmachine/Main.java. If this is false then these
// files would be included as out_dir/net/thoughtmachine/Main.java -> net.
func (o OutputDirectory) ShouldAddFiles() bool {
// TODO(jpoole): consider if we should have full glob matching for the suffix so we can do stuff like **.java
// or *_test.go. This will prove difficult for rex where we only have the file names rather than the actual
// directory
return strings.HasSuffix(string(o), "/**")
}
// A BuildTargetState tracks the current state of this target in regard to whether it's built
// or not. Targets only move forwards through this (i.e. the state of a target only ever increases).
type BuildTargetState int32
// The available states for a target.
const (
Inactive BuildTargetState = iota // Target isn't used in current build
Semiactive // Target would be active if we needed a build
Active // Target is going to be used in current build
Pending // Target is ready to be built but not yet started.
Building // Target is currently being built
Stopped // We stopped building the target because we'd gone as far as needed.
Built // Target has been successfully built
Cached // Target has been retrieved from the cache
Unchanged // Target has been built but hasn't changed since last build
Reused // Outputs of previous build have been reused.
BuiltRemotely // Target has been built but outputs are not necessarily local.
ReusedRemotely // Outputs of previous remote action have been reused.
Failed // Target failed for some reason
)
// String implements the fmt.Stringer interface.
func (s BuildTargetState) String() string {
if s == Inactive {
return "Inactive"
} else if s == Semiactive {
return "Semiactive"
} else if s == Active {
return "Active"
} else if s == Pending {
return "Pending"
} else if s == Building {
return "Building"
} else if s == Stopped {
return "Stopped"
} else if s == Built {
return "Built"
} else if s == Cached {
return "Cached"
} else if s == Unchanged {
return "Unchanged"
} else if s == Reused {
return "Reused"
} else if s == Failed {
return "Failed"
} else if s == BuiltRemotely {
return "Built remotely"
} else if s == ReusedRemotely {
return "Reused remote outputs"
}
return "Unknown"
}
// NewBuildTarget constructs & returns a new BuildTarget.
func NewBuildTarget(label BuildLabel) *BuildTarget {
return &BuildTarget{
Label: label,
state: int32(Inactive),
BuildingDescription: DefaultBuildingDescription,
}
}
// String returns a stringified form of the build label of this target, which is
// a unique identity for it.
func (target *BuildTarget) String() string {
return target.Label.String()
}
// TmpDir returns the temporary working directory for this target, eg.
// //mickey/donald:goofy -> plz-out/tmp/mickey/donald/goofy._build
// Note the extra subdirectory to keep rules separate from one another, and the .build suffix
// to attempt to keep rules from duplicating the names of sub-packages; obviously that is not
// 100% reliable but we don't have a better solution right now.
func (target *BuildTarget) TmpDir() string {
return path.Join(TmpDir, target.Label.Subrepo, target.Label.PackageName, target.Label.Name+buildDirSuffix)
}
// OutDir returns the output directory for this target, eg.
// //mickey/donald:goofy -> plz-out/gen/mickey/donald (or plz-out/bin if it's a binary)
func (target *BuildTarget) OutDir() string {
if target.IsBinary {
return path.Join(BinDir, target.Label.Subrepo, target.Label.PackageName)
}
return path.Join(GenDir, target.Label.Subrepo, target.Label.PackageName)
}
// TestDir returns the test directory for this target, eg.
// //mickey/donald:goofy -> plz-out/tmp/mickey/donald/goofy._test/run_1
// This is different to TmpDir so we run tests in a clean environment
// and to facilitate containerising tests.
func (target *BuildTarget) TestDir(runNumber int) string {
return path.Join(target.TestDirs(), fmt.Sprint("run_", runNumber))
}
// TestDirs contains the parent directory of all the test run directories above
func (target *BuildTarget) TestDirs() string {
return path.Join(TmpDir, target.Label.Subrepo, target.Label.PackageName, target.Label.Name+testDirSuffix)
}
// CompleteRun completes a run and returns true if this was the last run
func (target *BuildTarget) CompleteRun(state *BuildState) bool {
target.resultsMux.Lock()
defer target.resultsMux.Unlock()
target.completedRuns++
return target.completedRuns == state.NumTestRuns
}
// TestResultsFile returns the output results file for tests for this target.
func (target *BuildTarget) TestResultsFile() string {
return path.Join(target.OutDir(), ".test_results_"+target.Label.Name)
}
// CoverageFile returns the output coverage file for tests for this target.
func (target *BuildTarget) CoverageFile() string {
return path.Join(target.OutDir(), ".test_coverage_"+target.Label.Name)
}
// AddTestResults adds results to the target
func (target *BuildTarget) AddTestResults(results TestSuite) {
target.resultsMux.Lock()
defer target.resultsMux.Unlock()
if len(target.Results.TestCases) == 0 {
target.Results.Cached = results.Cached // On the first run we take whatever this is
} else {
target.Results.Cached = target.Results.Cached && results.Cached
}
target.Results.Collapse(results)
}
// StartTestSuite sets the initial properties on the result test suite
func (target *BuildTarget) StartTestSuite() {
target.resultsMux.Lock()
defer target.resultsMux.Unlock()
// If the results haven't been set yet, set them
if target.Results.Name == "" {
target.Results = TestSuite{
Package: strings.Replace(target.Label.PackageName, "/", ".", -1),
Name: target.Label.Name,
Timestamp: time.Now().Format(time.RFC3339),
}
}
}
// AllSourcePaths returns all the source paths for this target
func (target *BuildTarget) AllSourcePaths(graph *BuildGraph) []string {
return target.allSourcePaths(graph, BuildInput.Paths)
}
// AllFullSourcePaths returns all the source paths for this target, with a leading
// plz-out/gen etc if appropriate.
func (target *BuildTarget) AllFullSourcePaths(graph *BuildGraph) []string {
return target.allSourcePaths(graph, BuildInput.FullPaths)
}
// AllLocalSourcePaths returns the local part of all the source paths for this target,
// i.e. without this target's package in it.
func (target *BuildTarget) AllLocalSourcePaths(graph *BuildGraph) []string {
return target.allSourcePaths(graph, BuildInput.LocalPaths)
}
type buildPathsFunc func(BuildInput, *BuildGraph) []string
func (target *BuildTarget) allSourcePaths(graph *BuildGraph, full buildPathsFunc) []string {
ret := make([]string, 0, len(target.Sources))
for _, source := range target.AllSources() {
ret = append(ret, target.sourcePaths(graph, source, full)...)
}
return ret
}
// AllURLs returns all the URLs for this target.
// This should only be called if the target is a remote file.
// The URLs will have any embedded environment variables expanded according to the given config.
func (target *BuildTarget) AllURLs(config *Configuration) []string {
env := GeneralBuildEnvironment(config)
ret := make([]string, len(target.Sources))
for i, s := range target.Sources {
ret[i] = os.Expand(string(s.(URLLabel)), env.ReplaceEnvironment)
}
return ret
}
// DeclaredDependencies returns all the targets this target declared any kind of dependency on (including sources and tools).
func (target *BuildTarget) DeclaredDependencies() []BuildLabel {
ret := make(BuildLabels, len(target.dependencies))
for i, dep := range target.dependencies {
ret[i] = dep.declared
}
sort.Sort(ret)
return ret
}
// DeclaredDependenciesStrict returns the original declaration of this target's dependencies.
func (target *BuildTarget) DeclaredDependenciesStrict() []BuildLabel {
ret := make(BuildLabels, 0, len(target.dependencies))
for _, dep := range target.dependencies {
if !dep.exported && !dep.source && !target.IsTool(dep.declared) {
ret = append(ret, dep.declared)
}
}
sort.Sort(ret)
return ret
}
// Dependencies returns the resolved dependencies of this target.
func (target *BuildTarget) Dependencies() []*BuildTarget {
ret := make(BuildTargets, 0, len(target.dependencies))
for _, deps := range target.dependencies {
for _, dep := range deps.deps {
ret = append(ret, dep)
}
}
sort.Sort(ret)
return ret
}
// ExternalDependencies returns the non-internal dependencies of this target (i.e. not "_target#tag" ones).
func (target *BuildTarget) ExternalDependencies() []*BuildTarget {
ret := make(BuildTargets, 0, len(target.dependencies))
for _, deps := range target.dependencies {
for _, dep := range deps.deps {
if dep.Label.Parent() != target.Label {
ret = append(ret, dep)
} else {
ret = append(ret, dep.ExternalDependencies()...)
}
}
}
sort.Sort(ret)
return ret
}
// BuildDependencies returns the build-time dependencies of this target (i.e. not data and not internal).
func (target *BuildTarget) BuildDependencies() []*BuildTarget {
ret := make(BuildTargets, 0, len(target.dependencies))
for _, deps := range target.dependencies {
if !deps.data && !deps.internal {
for _, dep := range deps.deps {
ret = append(ret, dep)
}
}
}
sort.Sort(ret)
return ret
}
// ExportedDependencies returns any exported dependencies of this target.
func (target *BuildTarget) ExportedDependencies() []BuildLabel {
ret := make(BuildLabels, 0, len(target.dependencies))
for _, info := range target.dependencies {
if info.exported {
ret = append(ret, info.declared)
}
}
return ret
}
// DependenciesFor returns the dependencies that relate to a given label.
func (target *BuildTarget) DependenciesFor(label BuildLabel) []*BuildTarget {
if info := target.dependencyInfo(label); info != nil {
return info.deps
} else if target.Label.Subrepo != "" && label.Subrepo == "" {
// Can implicitly use the target's subrepo.
label.Subrepo = target.Label.Subrepo
return target.DependenciesFor(label)
}
return nil
}
// DeclaredOutputs returns the outputs from this target's original declaration.
// Hence it's similar to Outputs() but without the resolving of other rule names.
func (target *BuildTarget) DeclaredOutputs() []string {
return target.outputs
}
// DeclaredNamedOutputs returns the named outputs from this target's original declaration.
func (target *BuildTarget) DeclaredNamedOutputs() map[string][]string {
return target.namedOutputs
}
// DeclaredOutputNames is a convenience function to return the names of the declared
// outputs in a consistent order.
func (target *BuildTarget) DeclaredOutputNames() []string {
ret := make([]string, 0, len(target.namedOutputs))
for name := range target.namedOutputs {
ret = append(ret, name)
}
sort.Strings(ret)
return ret
}
// Outputs returns a slice of all the outputs of this rule.
func (target *BuildTarget) Outputs() []string {
var ret []string
if target.IsFilegroup {
ret = make([]string, 0, len(target.Sources))
// Filegroups just re-output their inputs.
for _, src := range target.Sources {
if namedLabel, ok := src.(NamedOutputLabel); ok {
// Bit of a hack, but this needs different treatment from either of the others.
for _, dep := range target.DependenciesFor(namedLabel.BuildLabel) {
ret = append(ret, dep.NamedOutputs(namedLabel.Output)...)
}
} else if label := src.nonOutputLabel(); label == nil {
ret = append(ret, src.LocalPaths(nil)[0])
} else {
for _, dep := range target.DependenciesFor(*label) {
ret = append(ret, dep.Outputs()...)
}
}
}
} else {
// Must really copy the slice before sorting it ([:] is too shallow)
ret = make([]string, len(target.outputs))
copy(ret, target.outputs)
}
if target.namedOutputs != nil {
for _, outputs := range target.namedOutputs {
ret = append(ret, outputs...)
}
}
sort.Strings(ret)
return ret
}
// FullOutputs returns a slice of all the outputs of this rule with the target's output directory prepended.
func (target *BuildTarget) FullOutputs() []string {
outs := target.Outputs()
outDir := target.OutDir()
for i, out := range outs {
outs[i] = path.Join(outDir, out)
}
return outs
}
// NamedOutputs returns a slice of all the outputs of this rule with a given name.
// If the name is not declared by this rule it panics.
func (target *BuildTarget) NamedOutputs(name string) []string {
if target.namedOutputs == nil {
return nil
}
if outs, present := target.namedOutputs[name]; present {
return outs
}
return nil
}
// GetTmpOutput takes the original output filename as an argument, and returns a temporary output
// filename(plz-out/tmp/) if output has the same name as the package, this avoids the name conflict issue
func (target *BuildTarget) GetTmpOutput(parseOutput string) string {
if target.IsFilegroup {
return parseOutput // Filegroups never need this.
} else if parseOutput == target.Label.PackageName {
return parseOutput + tempOutputSuffix
} else if target.Label.PackageName == "" && target.HasSource(parseOutput) {
// This also fixes the case where source and output are the same, which can happen
// when we're in the root directory.
return parseOutput + tempOutputSuffix
}
return parseOutput
}
// GetTmpOutputAll returns a slice of all the temporary outputs this is used in setting up environment for outputs,
// e.g: OUTS, OUT
func (target *BuildTarget) GetTmpOutputAll(parseOutputs []string) []string {
tmpOutputs := make([]string, len(parseOutputs))
for i, out := range parseOutputs {
tmpOutputs[i] = target.GetTmpOutput(out)
}
return tmpOutputs
}
// GetRealOutput returns the real output name for a filename that might have been a temporary output
// (i.e as returned by GetTmpOutput).
func (target *BuildTarget) GetRealOutput(output string) string {
if strings.HasSuffix(output, tempOutputSuffix) {
real := strings.TrimSuffix(output, tempOutputSuffix)
// Check this isn't a file that just happens to be named the same way
if target.GetTmpOutput(real) == output {
return real
}
}
return output
}
// SourcePaths returns the source paths for a given set of sources.
func (target *BuildTarget) SourcePaths(graph *BuildGraph, sources []BuildInput) []string {
ret := make([]string, 0, len(sources))
for _, source := range sources {
ret = append(ret, target.sourcePaths(graph, source, BuildInput.Paths)...)
}
return ret
}
// sourcePaths returns the source paths for a single source.
func (target *BuildTarget) sourcePaths(graph *BuildGraph, source BuildInput, f buildPathsFunc) []string {
if label := source.nonOutputLabel(); label != nil {
ret := []string{}
for _, providedLabel := range graph.TargetOrDie(*label).ProvideFor(target) {
ret = append(ret, f(providedLabel, graph)...)
}
return ret
}
return f(source, graph)
}
// allDepsBuilt returns true if all the dependencies of a target are built.
func (target *BuildTarget) allDepsBuilt() bool {
if !target.allDependenciesResolved() {
return false // Target still has some deps pending parse.
}
for _, deps := range target.dependencies {
for _, dep := range deps.deps {
if dep.State() < Built {
return false
}
}
}
return true
}
// allDependenciesResolved returns true once all the dependencies of a target have been
// parsed and resolved to real targets.
func (target *BuildTarget) allDependenciesResolved() bool {
for _, deps := range target.dependencies {
if !deps.resolved {
return false
}
}
return true
}
// CanSee returns true if target can see the given dependency, or false if not.
func (target *BuildTarget) CanSee(state *BuildState, dep *BuildTarget) bool {
return target.Label.CanSee(state, dep)
}
// CheckDependencyVisibility checks that all declared dependencies of this target are visible to it.
// Returns an error if not, or nil if all's well.
func (target *BuildTarget) CheckDependencyVisibility(state *BuildState) error {
for _, d := range target.dependencies {
dep := state.Graph.TargetOrDie(d.declared)
if !target.CanSee(state, dep) {
return fmt.Errorf("Target %s isn't visible to %s", dep.Label, target.Label)
} else if dep.TestOnly && !(target.IsTest || target.TestOnly) {
if target.Label.isExperimental(state) {
log.Warning("Test-only restrictions suppressed for %s since %s is in the experimental tree", dep.Label, target.Label)
} else {
return fmt.Errorf("Target %s can't depend on %s, it's marked test_only", target.Label, dep.Label)
}
}
}
return nil
}
// CheckDuplicateOutputs checks if any of the outputs of this target duplicate one another.
// Returns an error if so, or nil if all's well.
func (target *BuildTarget) CheckDuplicateOutputs() error {
outputs := map[string]struct{}{}
for _, output := range target.Outputs() {
if _, present := outputs[output]; present {
return fmt.Errorf("Target %s declares output file %s multiple times", target.Label, output)
}
outputs[output] = struct{}{}
}
return nil
}
// CheckTargetOwnsBuildOutputs checks that any outputs to this rule output into directories this of this package.
func (target *BuildTarget) CheckTargetOwnsBuildOutputs(state *BuildState) error {
// Skip this check for sub-repos because sub-repos are currently outputted into plz-gen so the output might also
// be a sub-repo that contains a package. This isn't the best solution but we can't fix this without reworking
// how sub-repos are done.
if target.Subrepo != nil {
return nil
}
for _, output := range target.outputs {
targetPackage := target.Label.PackageName
out := filepath.Join(targetPackage, output)
if fs.IsPackage(state.Config.Parse.BuildFileName, out) {
return fmt.Errorf("trying to output file %s, but that directory is another package", out)
}
// If the output is just a file in the package root, we don't need to check anything else.
if filepath.Dir(output) == "." {
continue
}
pkg := FindOwningPackage(state, out)
if targetPackage != pkg.PackageName {
return fmt.Errorf("trying to output file %s, but that directory belongs to another package (%s)", out, pkg.PackageName)
}
}
return nil
}
// CheckTargetOwnsBuildInputs checks that any file inputs to this rule belong to this package.
func (target *BuildTarget) CheckTargetOwnsBuildInputs(state *BuildState) error {
for _, input := range target.Sources {
if err := target.checkTargetOwnsBuildInput(state, input); err != nil {
return err
}
}
for _, input := range target.Data {
if err := target.checkTargetOwnsBuildInput(state, input); err != nil {
return err
}
}
return nil
}
func (target *BuildTarget) checkTargetOwnsBuildInput(state *BuildState, input BuildInput) error {
if input, ok := input.(FileLabel); ok {
for _, f := range input.Paths(state.Graph) {
if err := target.checkTargetOwnsFileAndSubDirectories(state, f); err != nil {
return err
}
}
}
return nil
}
func (target *BuildTarget) checkTargetOwnsFileAndSubDirectories(state *BuildState, file string) error {
pkg := FindOwningPackage(state, file)
if target.Label.PackageName != pkg.PackageName {
return fmt.Errorf("package %s is trying to use file %s, but that belongs to another package (%s)", target.Label.PackageName, file, pkg.PackageName)
}
if fs.IsDirectory(file) {
err := fs.Walk(file, func(name string, isDir bool) error {
if isDir && fs.IsPackage(state.Config.Parse.BuildFileName, name) {
return fmt.Errorf("cannot include %s as it contains subpackage %s", file, name)
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// CheckSecrets checks that this target's secrets are available.
// We run this check before building because we don't attempt to copy them, but any rule
// requiring them will presumably fail if they aren't available.
// Returns an error if any aren't.
func (target *BuildTarget) CheckSecrets() error {
for _, secret := range target.AllSecrets() {
if path := fs.ExpandHomePath(secret); !PathExists(path) {
return fmt.Errorf("Path %s doesn't exist; it's required to build %s", secret, target.Label)
}
}
return nil
}
// AllSecrets returns all the sources of this rule.
func (target *BuildTarget) AllSecrets() []string {
ret := target.Secrets[:]
if target.NamedSecrets != nil {
keys := make([]string, 0, len(target.NamedSecrets))
for k := range target.NamedSecrets {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
ret = append(ret, target.NamedSecrets[k]...)
}
}
return ret
}
// HasDependency checks if a target already depends on this label.
func (target *BuildTarget) HasDependency(label BuildLabel) bool {
return target.dependencyInfo(label) != nil
}
// hasResolvedDependency returns true if a particular dependency has been resolved to real targets yet.
func (target *BuildTarget) hasResolvedDependency(label BuildLabel) bool {
info := target.dependencyInfo(label)
return info != nil && info.resolved
}
// resolveDependency resolves a particular dependency on a target.
func (target *BuildTarget) resolveDependency(label BuildLabel, dep *BuildTarget) {
info := target.dependencyInfo(label)
if info == nil {
target.dependencies = append(target.dependencies, depInfo{declared: label})
info = &target.dependencies[len(target.dependencies)-1]
}
if dep != nil {
info.deps = append(info.deps, dep)
}
info.resolved = true
}
// dependencyInfo returns the information about a declared dependency, or nil if the target doesn't have it.
func (target *BuildTarget) dependencyInfo(label BuildLabel) *depInfo {
for i, info := range target.dependencies {
if info.declared == label {
return &target.dependencies[i]
}
}
return nil
}
// IsSourceOnlyDep returns true if the given dependency was only declared on the srcs of the target.
func (target *BuildTarget) IsSourceOnlyDep(label BuildLabel) bool {
info := target.dependencyInfo(label)
return info != nil && info.source
}
// State returns the target's current state.
func (target *BuildTarget) State() BuildTargetState {
return BuildTargetState(atomic.LoadInt32(&target.state))
}
// SetState sets a target's current state.
func (target *BuildTarget) SetState(state BuildTargetState) {
atomic.StoreInt32(&target.state, int32(state))
}
// SyncUpdateState oves the target's state from before to after via a lock.
// Returns true if successful, false if not (which implies something else changed the state first).
// The nature of our build graph ensures that most transitions are only attempted by
// one thread simultaneously, but this one can be attempted by several at once
// (eg. if a depends on b and c, which finish building simultaneously, they race to queue a).
func (target *BuildTarget) SyncUpdateState(before, after BuildTargetState) bool {
return atomic.CompareAndSwapInt32(&target.state, int32(before), int32(after))
}
// AddLabel adds the given label to this target if it doesn't already have it.
func (target *BuildTarget) AddLabel(label string) {
if !target.HasLabel(label) {
target.Labels = append(target.Labels, label)
}
}
// HasLabel returns true if target has the given label.
func (target *BuildTarget) HasLabel(label string) bool {
for _, l := range target.Labels {
if l == label {
return true
}
}
return label == "test" && target.IsTest
}
// PrefixedLabels returns all labels of this target with the given prefix.
func (target *BuildTarget) PrefixedLabels(prefix string) []string {
ret := []string{}
for _, l := range target.Labels {
if strings.HasPrefix(l, prefix) {
ret = append(ret, strings.TrimPrefix(l, prefix))
}
}
return ret
}
// HasAnyLabel returns true if target has any of these labels.
func (target *BuildTarget) HasAnyLabel(labels []string) bool {
for _, label := range labels {
if target.HasLabel(label) {
return true
}
}
return false
}
// HasAllLabels returns true if target has all of these labels.
func (target *BuildTarget) HasAllLabels(labels []string) bool {
for _, label := range labels {
if !target.HasLabel(label) {
return false
}
}
return true
}
// ShouldInclude handles the typical include/exclude logic for a target's labels; returns true if
// target has any include label and not an exclude one.
// Each include/exclude can have multiple comma-separated labels; in this case, all of the labels
// in a given group must match.
func (target *BuildTarget) ShouldInclude(includes, excludes []string) bool {
if len(includes) == 0 && len(excludes) == 0 {
return true
}
// Include by default if no includes are specified.
shouldInclude := len(includes) == 0
for _, include := range includes {
if target.HasAllLabels(strings.Split(include, ",")) {
shouldInclude = true
break
}
}
for _, exclude := range excludes {
if target.HasAllLabels(strings.Split(exclude, ",")) {
shouldInclude = false
break
}
}
return shouldInclude
}
// AddProvide adds a new provide entry to this target.
func (target *BuildTarget) AddProvide(language string, label BuildLabel) {
if target.Provides == nil {
target.Provides = map[string]BuildLabel{language: label}
} else {
target.Provides[language] = label
}
}
// ProvideFor returns the build label that we'd provide for the given target.
func (target *BuildTarget) ProvideFor(other *BuildTarget) []BuildLabel {
ret := []BuildLabel{}
if target.Provides != nil && len(other.Requires) != 0 {
// Never do this if the other target has a data or tool dependency on us.
for _, data := range other.Data {
if label := data.Label(); label != nil && *label == target.Label {
return []BuildLabel{target.Label}
}
}
if other.IsTool(target.Label) {
return []BuildLabel{target.Label}
}
for _, require := range other.Requires {
if label, present := target.Provides[require]; present {
ret = append(ret, label)
}
}
if len(ret) > 0 {
return ret
}
}
return []BuildLabel{target.Label}
}
// UnprefixedHashes returns the hashes for the target without any prefixes;
// they are allowed to have optional prefixes before a colon which aren't taken
// into account for the resulting hash.
func (target *BuildTarget) UnprefixedHashes() []string {
hashes := target.Hashes[:]
for i, h := range hashes {
if index := strings.LastIndexByte(h, ':'); index != -1 {
hashes[i] = strings.TrimSpace(h[index+1:])
}
}
return hashes
}
// AddSource adds a source to the build target, deduplicating against existing entries.
func (target *BuildTarget) AddSource(source BuildInput) {
target.Sources = target.addSource(target.Sources, source)
}
func (target *BuildTarget) addSource(sources []BuildInput, source BuildInput) []BuildInput {
for _, src := range sources {
if source == src {
return sources
}
}
// Add a dependency if this is not just a file.
if label := source.Label(); label != nil {
target.AddMaybeExportedDependency(*label, false, true, false)
}
return append(sources, source)
}
// AddSecret adds a secret to the build target, deduplicating against existing entries.
func (target *BuildTarget) AddSecret(secret string) {
target.Secrets = target.addSecret(target.Secrets, secret)
}
func (target *BuildTarget) addSecret(secrets []string, secret string) []string {
for _, existing := range secrets {
if existing == secret {
return secrets
}
}
return append(secrets, secret)
}
// AddNamedSource adds a source to the target which is tagged with a particular name.
// For example, C++ rules add sources tagged as "sources" and "headers" to distinguish
// two conceptually different kinds of input.
func (target *BuildTarget) AddNamedSource(name string, source BuildInput) {
if target.NamedSources == nil {
target.NamedSources = map[string][]BuildInput{name: target.addSource(nil, source)}
} else {
target.NamedSources[name] = target.addSource(target.NamedSources[name], source)
}
}
// AddNamedSecret adds a secret to the target which is tagged with a particular name.
// These will be made available in the environment at runtime, with key-format "SECRETS_<NAME>".
func (target *BuildTarget) AddNamedSecret(name string, secret string) {
if target.NamedSecrets == nil {
target.NamedSecrets = map[string][]string{name: target.addSecret(nil, secret)}
} else {
target.NamedSecrets[name] = target.addSecret(target.NamedSecrets[name], secret)
}
}
// AddTool adds a new tool to the target.
func (target *BuildTarget) AddTool(tool BuildInput) {
target.Tools = append(target.Tools, tool)
if label := tool.Label(); label != nil {
target.AddDependency(*label)
}
}
// AddDatum adds a new item of data to the target.
func (target *BuildTarget) AddDatum(datum BuildInput) {
target.Data = append(target.Data, datum)
if label := datum.Label(); label != nil {
target.AddDependency(*label)
target.dependencyInfo(*label).data = true
}
}
// AddNamedDatum adds a data file to the target which is tagged with a particular name.
func (target *BuildTarget) AddNamedDatum(name string, datum BuildInput) {
if target.namedData == nil {
target.namedData = map[string][]BuildInput{name: {datum}}
} else {
target.namedData[name] = append(target.namedData[name], datum)
}
if label := datum.Label(); label != nil {
target.AddDependency(*label)
target.dependencyInfo(*label).data = true
}
}
// AddNamedTool adds a new tool to the target.
func (target *BuildTarget) AddNamedTool(name string, tool BuildInput) {
if target.namedTools == nil {
target.namedTools = map[string][]BuildInput{name: {tool}}
} else {
target.namedTools[name] = append(target.namedTools[name], tool)
}
if label := tool.Label(); label != nil {
target.AddDependency(*label)
}
}
// AddCommand adds a new config-specific command to this build target.
// Adding a general command is still done by simply setting the Command member.
func (target *BuildTarget) AddCommand(config, command string) {
if target.Command != "" {
panic(fmt.Sprintf("Adding named command %s to %s, but it already has a general command set", config, target.Label))
} else if target.Commands == nil {
target.Commands = map[string]string{config: command}
} else {
target.Commands[config] = command
}
}
// AddTestCommand adds a new config-specific test command to this build target.
// Adding a general command is still done by simply setting the TestCommand member.
func (target *BuildTarget) AddTestCommand(config, command string) {
if target.TestCommand != "" {
panic(fmt.Sprintf("Adding named test command %s to %s, but it already has a general test command set", config, target.Label))
} else if target.TestCommands == nil {
target.TestCommands = map[string]string{config: command}
} else {
target.TestCommands[config] = command
}
}
// GetCommand returns the command we should use to build this target for the current config.
func (target *BuildTarget) GetCommand(state *BuildState) string {
return target.getCommand(state, target.Commands, target.Command)
}
// GetCommandConfig returns the command we should use to build this target for the given config.
func (target *BuildTarget) GetCommandConfig(config string) string {
if config == "" {
return target.Command
}
return target.Commands[config]
}
// GetTestCommand returns the command we should use to test this target for the current config.
func (target *BuildTarget) GetTestCommand(state *BuildState) string {
return target.getCommand(state, target.TestCommands, target.TestCommand)
}
func (target *BuildTarget) getCommand(state *BuildState, commands map[string]string, singleCommand string) string {
if commands == nil {
return singleCommand
} else if command, present := commands[state.Config.Build.Config]; present {
return command // Has command for current config, good
} else if command, present := commands[state.Config.Build.FallbackConfig]; present {
return command // Has command for default config, fall back to that
}
// Oh dear, target doesn't have any matching config. Panicking is a bit heavy here, instead
// fall back to an arbitrary (but consistent) one.
highestCommand := ""
highestConfig := ""
for config, command := range commands {
if config > highestConfig {
highestConfig = config
highestCommand = command
}
}
log.Warning("%s doesn't have a command for %s (or %s), falling back to %s",
target.Label, state.Config.Build.Config, state.Config.Build.FallbackConfig, highestConfig)
return highestCommand
}
// AllSources returns all the sources of this rule.
func (target *BuildTarget) AllSources() []BuildInput {
ret := target.Sources[:]
if target.NamedSources != nil {
keys := make([]string, 0, len(target.NamedSources))
for k := range target.NamedSources {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
ret = append(ret, target.NamedSources[k]...)
}
}
return ret
}
// AllLocalSources returns all the "local" sources of this rule, i.e. all sources that are
// actually sources in the repo, not other rules or system srcs etc.
func (target *BuildTarget) AllLocalSources() []string {
ret := []string{}
for _, src := range target.AllSources() {
if file, ok := src.(FileLabel); ok {
ret = append(ret, file.Paths(nil)[0])
}
}
return ret
}
// HasSource returns true if this target has the given file as a source (named or not, or data).
func (target *BuildTarget) HasSource(source string) bool {
for _, src := range append(target.AllSources(), target.AllData()...) {
// Check for both the source matching and a prefix match indicating it's a directory with the file within.
if s := src.String(); s == source || strings.HasPrefix(source, s+"/") {
return true
}
}
return false
}
// HasAbsoluteSource returns true if this target has the given file as a source (or data).
// The input source includes the target's package name.
func (target *BuildTarget) HasAbsoluteSource(source string) bool {
return target.HasSource(strings.TrimPrefix(source, target.Label.PackageName+"/"))
}
// AllData returns all the data files for this rule.
func (target *BuildTarget) AllData() []BuildInput {
ret := target.Data[:]
if target.namedData != nil {
keys := make([]string, 0, len(target.namedData))
for k := range target.namedData {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
ret = append(ret, target.namedData[k]...)
}
}
return ret
}
// AllDataPaths returns the paths for all the data of this target.
func (target *BuildTarget) AllDataPaths(graph *BuildGraph) []string {
ret := make([]string, 0, len(target.Data))
for _, datum := range target.AllData() {
ret = append(ret, target.sourcePaths(graph, datum, BuildInput.Paths)...)
}
return ret
}
// AllTools returns all the tools for this rule in some canonical order.
func (target *BuildTarget) AllTools() []BuildInput {
if target.namedTools == nil {
return target.Tools // Leave them in input order, that's sufficiently consistent.
}
tools := make([]BuildInput, len(target.Tools), len(target.Tools)+len(target.namedTools)*2)
copy(tools, target.Tools)
for _, name := range target.ToolNames() {
tools = append(tools, target.namedTools[name]...)
}
return tools
}
// ToolNames returns an ordered list of tool names.
func (target *BuildTarget) ToolNames() []string {
ret := make([]string, 0, len(target.namedTools))
for name := range target.namedTools {
ret = append(ret, name)
}
sort.Strings(ret)
return ret
}
// NamedTools returns the tools with the given name.
func (target *BuildTarget) NamedTools(name string) []BuildInput {
return target.namedTools[name]
}
// AddDependency adds a dependency to this target. It deduplicates against any existing deps.
func (target *BuildTarget) AddDependency(dep BuildLabel) {
target.AddMaybeExportedDependency(dep, false, false, false)
}
// AddMaybeExportedDependency adds a dependency to this target which may be exported. It deduplicates against any existing deps.
func (target *BuildTarget) AddMaybeExportedDependency(dep BuildLabel, exported, source, internal bool) {
if dep == target.Label {
log.Fatalf("Attempted to add %s as a dependency of itself.\n", dep)
}
info := target.dependencyInfo(dep)
if info == nil {
target.dependencies = append(target.dependencies, depInfo{declared: dep, exported: exported, source: source, internal: internal})
} else {
info.exported = info.exported || exported
info.source = info.source && source
info.internal = info.internal && internal
info.data = false // It's not *only* data any more.
}
}
// IsTool returns true if the given build label is a tool used by this target.
func (target *BuildTarget) IsTool(tool BuildLabel) bool {
for _, t := range target.Tools {
if t == tool {
return true
}
}
for _, tools := range target.namedTools {
for _, t := range tools {
if t == tool {
return true
}
}
}
return false
}
// toolPath returns a path to this target when used as a tool.
func (target *BuildTarget) toolPath(abs bool) string {
outputs := target.Outputs()
ret := make([]string, len(outputs))
for i, o := range outputs {
if abs {
ret[i] = path.Join(RepoRoot, target.OutDir(), o)
} else {
ret[i] = path.Join(target.Label.PackageName, o)
}
}
return strings.Join(ret, " ")
}
// AddOutput adds a new output to the target if it's not already there.
func (target *BuildTarget) AddOutput(output string) {
target.outputs = target.insert(target.outputs, output)
}
// AddOptionalOutput adds a new optional output to the target if it's not already there.
func (target *BuildTarget) AddOptionalOutput(output string) {
target.OptionalOutputs = target.insert(target.OptionalOutputs, output)
}
// AddTestOutput adds a new test output to the target if it's not already there.
func (target *BuildTarget) AddTestOutput(output string) {
target.TestOutputs = target.insert(target.TestOutputs, output)
}
// AddNamedOutput adds a new output to the target under a named group.
// No attempt to deduplicate against unnamed outputs is currently made.
func (target *BuildTarget) AddNamedOutput(name, output string) {
if target.namedOutputs == nil {
target.namedOutputs = map[string][]string{name: target.insert(nil, output)}
return
}
target.namedOutputs[name] = target.insert(target.namedOutputs[name], output)
}
// insert adds a string into a slice if it's not already there. Sorted order is maintained.
func (target *BuildTarget) insert(sl []string, s string) []string {
if s == "" {
panic("Cannot add an empty string as an output of a target")
}
s = strings.TrimPrefix(s, "./")
for i, x := range sl {
if s == x {
// Already present.
return sl
} else if x > s {
// Insert in this location. Make an attempt to be efficient.
sl = append(sl, "")
copy(sl[i+1:], sl[i:])
sl[i] = s
return sl
}
}
return append(sl, s)
}
// AddLicence adds a licence to the target if it's not already there.
func (target *BuildTarget) AddLicence(licence string) {
licence = strings.TrimSpace(licence)
for _, l := range target.Licences {
if l == licence {
return
}
}
target.Licences = append(target.Licences, licence)
}
// AddHash adds a new acceptable hash to the target.
func (target *BuildTarget) AddHash(hash string) {
target.Hashes = append(target.Hashes, hash)
}
// AddRequire adds a new requirement to the target.
func (target *BuildTarget) AddRequire(require string) {
target.Requires = append(target.Requires, require)
// Requirements are also implicit labels
target.AddLabel(require)
}
// OutMode returns the mode to set outputs of a target to.
func (target *BuildTarget) OutMode() os.FileMode {
if target.IsBinary {
return 0555
}
return 0444
}
// TargetBuildMetadataFileName returns the target build metadata file name for this target.
func (target *BuildTarget) TargetBuildMetadataFileName() string {
return ".target_build_metadata_" + target.Label.Name
}
// StampFileName returns the stamp filename for this target.
func (target *BuildTarget) StampFileName() string {
return ".stamp_" + target.Label.Name
}
// NeedCoverage returns true if this target should output coverage during a test
// for a particular invocation.
func (target *BuildTarget) NeedCoverage(state *BuildState) bool {
return state.NeedCoverage && !target.NoTestOutput && !target.HasAnyLabel(state.Config.Test.DisableCoverage)
}
// Parent finds the parent of a build target, or nil if the target is parentless.
// Note that this is a fairly informal relationship; we identify it by labels with the convention of
// a leading _ and trailing hashtag on child rules, rather than storing pointers between them in the graph.
// The parent returned, if any, will be the ultimate ancestor of the target.
func (target *BuildTarget) Parent(graph *BuildGraph) *BuildTarget {
parent := target.Label.Parent()
if parent == target.Label {
return nil
}
return graph.Target(parent)
}
// HasParent returns true if the target has a parent rule that's not itself.
func (target *BuildTarget) HasParent() bool {
return target.Label.HasParent()
}
// ShouldShowProgress returns true if the target should display progress.
// This is provided as a function to satisfy the process package.
func (target *BuildTarget) ShouldShowProgress() bool {
return target.ShowProgress
}
// ProgressDescription returns a description of what the target is doing as it runs.
// This is provided as a function to satisfy the process package.
func (target *BuildTarget) ProgressDescription() string {
if target.State() >= Built && target.IsTest {
return "testing"
}
return target.BuildingDescription
}
// ShouldExitOnError returns true if the subprocess should exit when an error occurs.
func (target *BuildTarget) ShouldExitOnError() bool {
return target.ExitOnError
}
// SetProgress sets the current progress of this target.
func (target *BuildTarget) SetProgress(progress float32) {
target.Progress = progress
}
// BuildCouldModifyTarget will return true when the action of building this target could change the target itself e.g.
// by adding new outputs
func (target *BuildTarget) BuildCouldModifyTarget() bool {
return target.PostBuildFunction != nil || len(target.OutputDirectories) > 0
}
// AddOutputDirectory adds an output directory to the target
func (target *BuildTarget) AddOutputDirectory(dir string) {
target.OutputDirectories = append(target.OutputDirectories, OutputDirectory(dir))
}
// BuildTargets makes a slice of build targets sortable by their labels.
type BuildTargets []*BuildTarget
func (slice BuildTargets) Len() int {
return len(slice)
}
func (slice BuildTargets) Less(i, j int) bool {
return slice[i].Label.Less(slice[j].Label)
}
func (slice BuildTargets) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
| 1 | 9,335 | if we're introducing this now can we make this private? | thought-machine-please | go |
@@ -103,6 +103,17 @@ namespace pwiz.Skyline.Model.Results
if (_totalSteps == 0)
return measured;
+ // Before we do anything else, make sure the raw files are present
+ foreach (var f in fileInfos)
+ {
+ if (!ScanProvider.FileExists(_documentFilePath, f.FilePath))
+ {
+ throw new FileNotFoundException(TextUtil.LineSeparate(Resources.IonMobilityFinder_ProcessMSLevel_Failed_using_results_to_populate_ion_mobility_library_,
+ string.Format(Resources.ScanProvider_GetScans_The_data_file__0__could_not_be_found__either_at_its_original_location_or_in_the_document_or_document_parent_folder_,
+ f.FilePath)));
+ }
+ }
+
using (_msDataFileScanHelper = new MsDataFileScanHelper(SetScans, HandleLoadScanException, true))
{
// | 1 | /*
* Original author: Brian Pratt <bspratt .at. proteinms.net>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2015 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Threading;
using pwiz.Common.Chemistry;
using pwiz.Common.SystemUtil;
using pwiz.ProteowizardWrapper;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
namespace pwiz.Skyline.Model.Results
{
/// <summary>
/// Finds ion mobilities by examining loaded results in a document.
/// N.B. does not attempt to find multiple conformers
/// </summary>
public class IonMobilityFinder : IDisposable
{
private MsDataFileScanHelper _msDataFileScanHelper;
private readonly string _documentFilePath;
private readonly SrmDocument _document;
private TransitionGroupDocNode _currentDisplayedTransitionGroupDocNode;
private Dictionary<LibKey, List<IonMobilityIntensityPair>> _ms1IonMobilities;
private Dictionary<LibKey, List<IonMobilityIntensityPair>> _ms2IonMobilities;
private int _totalSteps;
private int _currentStep;
private readonly IProgressMonitor _progressMonitor;
private IProgressStatus _progressStatus;
private Exception _dataFileScanHelperException;
private double _maxHighEnergyDriftOffsetMsec;
private bool _useHighEnergyOffset;
private IonMobilityValue _ms1IonMobilityBest;
private struct IonMobilityIntensityPair
{
public IonMobilityAndCCS IonMobility { get; set; }
public double Intensity { get; set; }
}
/// <summary>
/// Finds ion mobilities by examining loaded results in a document.
/// </summary>
/// <param name="document">The document to be inspected</param>
/// <param name="documentFilePath">Aids in locating the raw files</param>
/// <param name="progressMonitor">Optional progress monitor for this potentially long operation</param>
public IonMobilityFinder(SrmDocument document, string documentFilePath, IProgressMonitor progressMonitor)
{
_document = document;
_documentFilePath = documentFilePath;
_currentDisplayedTransitionGroupDocNode = null;
_progressMonitor = progressMonitor;
}
public bool UseHighEnergyOffset
{
get => _useHighEnergyOffset;
set
{
_useHighEnergyOffset = value;
_maxHighEnergyDriftOffsetMsec =
_useHighEnergyOffset ? 2 : 0; // CONSIDER(bspratt): user definable? or dynamically set by looking at scan to scan drift delta? Or resolving power?
}
}
/// <summary>
/// Looks through the result and finds ion mobility values.
/// Note that this method only returns new values that were found in results.
/// The returned dictionary should be merged with the existing values in
/// order to preserve those existing values.
/// </summary>
public Dictionary<LibKey, IonMobilityAndCCS> FindIonMobilityPeaks()
{
// Overwrite any existing measurements with newly derived ones
var measured = new Dictionary<LibKey, IonMobilityAndCCS>();
if (_document.Settings.MeasuredResults == null)
return measured;
var fileInfos = _document.Settings.MeasuredResults.MSDataFileInfos.ToArray();
_totalSteps = fileInfos.Length * _document.MoleculeTransitionGroupCount;
if (_totalSteps == 0)
return measured;
using (_msDataFileScanHelper = new MsDataFileScanHelper(SetScans, HandleLoadScanException, true))
{
//
// Avoid opening and re-opening raw files - make these the outer loop
//
_ms1IonMobilities = new Dictionary<LibKey, List<IonMobilityIntensityPair>>();
_ms2IonMobilities = new Dictionary<LibKey, List<IonMobilityIntensityPair>>();
var twopercent = (int) Math.Ceiling(_totalSteps*0.02);
_totalSteps += twopercent;
_currentStep = twopercent;
if (_progressMonitor != null)
{
_progressStatus = new ProgressStatus(fileInfos.First().FilePath.GetFileName());
_progressStatus = _progressStatus.UpdatePercentCompleteProgress(_progressMonitor, _currentStep, _totalSteps); // Make that initial lag seem less dismal to the user
}
foreach (var fileInfo in fileInfos)
{
if (!ProcessFile(fileInfo))
return null; // User cancelled
}
// Find ion mobilities based on MS1 data
foreach (var dt in _ms1IonMobilities)
{
// Choose the ion mobility which gave the largest signal
// CONSIDER: average IM and CCS values that fall "near" the IM of largest signal? Or consider them multiple conformers?
var ms1IonMobility = dt.Value.OrderByDescending(p => p.Intensity).First().IonMobility;
// Check for MS2 data to use for high energy offset
List<IonMobilityIntensityPair> listDt;
var ms2IonMobility = _ms2IonMobilities.TryGetValue(dt.Key, out listDt)
? listDt.OrderByDescending(p => p.Intensity).First().IonMobility
: ms1IonMobility;
var highEnergyIonMobilityValueOffset = Math.Round(ms2IonMobility.IonMobility.Mobility.Value - ms1IonMobility.IonMobility.Mobility.Value, 6); // Excessive precision is just distracting noise TODO(bspratt) ask vendors what "excessive" means here
var value = IonMobilityAndCCS.GetIonMobilityAndCCS(ms1IonMobility.IonMobility, ms1IonMobility.CollisionalCrossSectionSqA, highEnergyIonMobilityValueOffset);
if (!measured.ContainsKey(dt.Key))
measured.Add(dt.Key, value);
else
measured[dt.Key] = value;
}
// Check for data for which we have only MS2 to go on
foreach (var im in _ms2IonMobilities)
{
if (!_ms1IonMobilities.ContainsKey(im.Key))
{
// Only MS2 ion mobility values found, use that as a reasonable inference of MS1 ion mobility
var driftTimeIntensityPair = im.Value.OrderByDescending(p => p.Intensity).First();
var value = driftTimeIntensityPair.IonMobility;
// Note collisional cross section
if (_msDataFileScanHelper.ProvidesCollisionalCrossSectionConverter)
{
var mz = im.Key.PrecursorMz ?? GetMzFromDocument(im.Key);
var ccs = _msDataFileScanHelper.CCSFromIonMobility(value.IonMobility,
mz, im.Key.Charge);
if (ccs.HasValue)
{
value = IonMobilityAndCCS.GetIonMobilityAndCCS(value.IonMobility, ccs, value.HighEnergyIonMobilityValueOffset);
}
}
if (!measured.ContainsKey(im.Key))
measured.Add(im.Key, value);
else
measured[im.Key] = value;
}
}
}
return measured;
}
double GetMzFromDocument(LibKey key)
{
foreach (var pair in _document.MoleculePrecursorPairs)
{
var nodePep = pair.NodePep;
var nodeGroup = pair.NodeGroup;
var libKey = nodeGroup.GetLibKey(_document.Settings, nodePep);
if (key.Equals(libKey))
{
return nodeGroup.PrecursorMz;
}
}
return 0.0;
}
// Returns false on cancellation
private bool ProcessFile(ChromFileInfo fileInfo)
{
var results = _document.Settings.MeasuredResults;
if (!results.MSDataFileInfos.Contains(fileInfo))
return true; // Nothing to do
var filePath = fileInfo.FilePath;
if (_progressStatus != null)
{
_progressStatus = _progressStatus.ChangeMessage(filePath.GetFileName());
}
_currentDisplayedTransitionGroupDocNode = null;
var tolerance = (float)_document.Settings.TransitionSettings.Instrument.MzMatchTolerance;
foreach (var pair in _document.MoleculePrecursorPairs)
{
var nodePep = pair.NodePep;
var nodeGroup = pair.NodeGroup;
var libKey = nodeGroup.GetLibKey(_document.Settings, nodePep);
// Across all replicates for this precursor, note the ion mobility at max intensity for this mz
for (var i = 0; i < results.Chromatograms.Count; i++)
{
if (_progressMonitor != null && _progressMonitor.IsCanceled)
return false;
ChromatogramGroupInfo[] chromGroupInfos;
results.TryLoadChromatogram(i, nodePep, nodeGroup, tolerance, out chromGroupInfos);
foreach (var chromInfo in chromGroupInfos.Where(c => Equals(filePath, c.FilePath)))
{
if (!ProcessChromInfo(fileInfo, chromInfo, pair, nodeGroup, tolerance, libKey))
return false; // User cancelled
}
}
}
return true;
}
private bool ProcessChromInfo(ChromFileInfo fileInfo, ChromatogramGroupInfo chromInfo, PeptidePrecursorPair pair,
TransitionGroupDocNode nodeGroup, float tolerance, LibKey libKey)
{
if (chromInfo.NumPeaks == 0) // Due to data polarity mismatch, probably
return true;
Assume.IsTrue(chromInfo.BestPeakIndex != -1);
var filePath = fileInfo.FilePath;
var resultIndex = _document.Settings.MeasuredResults.Chromatograms.IndexOf(c => c.GetFileInfo(filePath) != null);
if (resultIndex == -1)
return true;
var chromFileInfo = _document.Settings.MeasuredResults.Chromatograms[resultIndex].GetFileInfo(filePath);
Assume.IsTrue(Equals(chromFileInfo.FilePath.GetLockMassParameters(), filePath.GetLockMassParameters()));
// Determine apex RT for DT measurement using most intense MS1 peak
var apexRT = GetApexRT(nodeGroup, resultIndex, chromFileInfo, true) ??
GetApexRT(nodeGroup, resultIndex, chromFileInfo, false);
if (!apexRT.HasValue)
{
return true;
}
Assume.IsTrue(chromInfo.PrecursorMz.CompareTolerant(pair.NodeGroup.PrecursorMz, 1.0E-9f) == 0 , @"mismatch in precursor values");
// Only use the transitions currently enabled
var transitionPointSets = chromInfo.TransitionPointSets.Where(
tp => nodeGroup.Transitions.Any(
t => (t.Mz - (tp.ExtractionWidth ?? tolerance)/2) <= tp.ProductMz &&
(t.Mz + (tp.ExtractionWidth ?? tolerance)/2) >= tp.ProductMz))
.ToArray();
for (var msLevel = 1; msLevel <= 2; msLevel++)
{
if (!ProcessMSLevel(fileInfo, msLevel, transitionPointSets, chromInfo, apexRT, nodeGroup, libKey, tolerance))
return false; // User cancelled
}
return true;
}
private static double? GetApexRT(TransitionGroupDocNode nodeGroup, int resultIndex, ChromFileInfo chromFileInfo, bool ms1Trans)
{
double? apexRT = null;
float ms1Max = 0;
var trans = ms1Trans
? nodeGroup.GetMsTransitions(true)
: nodeGroup.GetMsMsTransitions(true);
foreach (var nodeTran in trans)
{
foreach (var peakInfo in nodeTran.GetChromInfos(resultIndex).Where(c =>
ReferenceEquals(c.FileId, chromFileInfo.FileId)))
{
if (peakInfo.Area > ms1Max)
{
apexRT = peakInfo.RetentionTime;
ms1Max = peakInfo.Area;
}
}
}
return apexRT;
}
private bool ProcessMSLevel(ChromFileInfo fileInfo, int msLevel, IEnumerable<ChromatogramInfo> transitionPointSets,
ChromatogramGroupInfo chromInfo, double? apexRT, TransitionGroupDocNode nodeGroup, LibKey libKey, float tolerance)
{
var transitions = new List<TransitionFullScanInfo>();
var chromSource = (msLevel == 1) ? ChromSource.ms1 : ChromSource.fragment;
IList<float> times = null;
foreach (var tranPointSet in transitionPointSets.Where(t => t.Source == chromSource))
{
transitions.Add(new TransitionFullScanInfo
{
//Name = tranPointSet.Header.,
Source = chromSource,
TimeIntensities = tranPointSet.TimeIntensities,
PrecursorMz = chromInfo.PrecursorMz,
ProductMz = tranPointSet.ProductMz,
ExtractionWidth = tranPointSet.ExtractionWidth,
//Id = nodeTran.Id
});
times = tranPointSet.Times;
}
if (!transitions.Any())
{
return true; // Nothing to do at this ms level
}
var filePath = fileInfo.FilePath;
IScanProvider scanProvider = new ScanProvider(_documentFilePath, filePath,
chromSource, times, transitions.ToArray(), _document.Settings.MeasuredResults);
// Across all spectra at the peak retention time, find the one with max total
// intensity for the mz's of interest (ie the isotopic distribution) and note its ion mobility.
var scanIndex = MsDataFileScanHelper.FindScanIndex(times, apexRT.Value);
_msDataFileScanHelper.UpdateScanProvider(scanProvider, 0, scanIndex);
_msDataFileScanHelper.MsDataSpectra = null; // Reset
scanIndex = _msDataFileScanHelper.GetScanIndex();
_msDataFileScanHelper.ScanProvider.SetScanForBackgroundLoad(scanIndex);
lock (this)
{
while (_msDataFileScanHelper.MsDataSpectra == null && _dataFileScanHelperException == null)
{
if (_progressMonitor != null && _progressMonitor.IsCanceled)
return false;
Monitor.Wait(this, 500); // Let background loader do its thing
}
}
if (_dataFileScanHelperException != null)
{
throw new IOException(TextUtil.LineSeparate(Resources.IonMobilityFinder_ProcessMSLevel_Failed_using_results_to_populate_ion_mobility_library_, _dataFileScanHelperException.Message), _dataFileScanHelperException);
}
if (_progressMonitor != null && !ReferenceEquals(nodeGroup, _currentDisplayedTransitionGroupDocNode))
{
// Do this after scan load so first group after file switch doesn't seem laggy
_progressStatus = _progressStatus.ChangeMessage(TextUtil.LineSeparate(filePath.GetFileName(), nodeGroup.ToString())).
UpdatePercentCompleteProgress(_progressMonitor, _currentStep++, _totalSteps);
_currentDisplayedTransitionGroupDocNode = nodeGroup;
}
EvaluateBestIonMobilityValue(msLevel, libKey, tolerance, transitions);
return true;
}
private void EvaluateBestIonMobilityValue(int msLevel, LibKey libKey, float tolerance, List<TransitionFullScanInfo> transitions)
{
IonMobilityValue ionMobilityValue = IonMobilityValue.EMPTY;
double maxIntensity = 0;
// Avoid picking MS2 ion mobility values wildly different from MS1 values
if ((msLevel == 2) && _ms1IonMobilities.ContainsKey(libKey))
{
_ms1IonMobilityBest =
_ms1IonMobilities[libKey].OrderByDescending(p => p.Intensity)
.FirstOrDefault()
.IonMobility.IonMobility;
}
else
{
_ms1IonMobilityBest = IonMobilityValue.EMPTY;
}
var totalIntensitiesPerIM = new Dictionary<double, double>();
double ionMobilityAtMaxIntensity = 0;
var isThreeArrayFormat = false;
foreach (var scan in _msDataFileScanHelper.MsDataSpectra.Where(scan => scan != null))
{
isThreeArrayFormat = scan.IonMobilities != null;
if (!isThreeArrayFormat)
{
if (!scan.IonMobility.HasValue || !scan.Mzs.Any())
continue;
if (IsExtremeMs2Value(scan.IonMobility.Mobility.Value))
continue;
// Get the total intensity for all transitions of current msLevel
double totalIntensity = 0;
foreach (var t in transitions)
{
var mzHigh = FindRangeMz(tolerance, t, scan, out var first);
for (var i = first; i < scan.Mzs.Length; i++)
{
if (scan.Mzs[i] > mzHigh)
break;
totalIntensity += scan.Intensities[i];
}
}
if (maxIntensity < totalIntensity)
{
ionMobilityValue = scan.IonMobility;
maxIntensity = totalIntensity;
}
}
else // 3-array IMS format
{
// Get the total intensity for all transitions of current msLevel
foreach (var t in transitions)
{
var mzHigh = FindRangeMz(tolerance, t, scan, out var first);
for (var i = first; i < scan.Mzs.Length; i++)
{
if (scan.Mzs[i] > mzHigh)
break;
var im = scan.IonMobilities[i];
if (IsExtremeMs2Value(im))
continue;
var intensityThisMzAndIM = scan.Intensities[i];
if (!totalIntensitiesPerIM.TryGetValue(im, out var totalIntensityThisIM))
{
totalIntensityThisIM = intensityThisMzAndIM;
totalIntensitiesPerIM.Add(im, totalIntensityThisIM);
}
else
{
totalIntensityThisIM += intensityThisMzAndIM;
totalIntensitiesPerIM[im] = totalIntensityThisIM;
}
if (maxIntensity < totalIntensityThisIM)
{
maxIntensity = totalIntensityThisIM;
ionMobilityAtMaxIntensity = im;
}
}
}
}
}
if (isThreeArrayFormat)
{
ionMobilityValue = IonMobilityValue.GetIonMobilityValue(ionMobilityAtMaxIntensity, _msDataFileScanHelper.ScanProvider.IonMobilityUnits);
}
if (ionMobilityValue.HasValue)
{
var dict = (msLevel == 1) ? _ms1IonMobilities : _ms2IonMobilities;
var ccs = msLevel == 1 && _msDataFileScanHelper.ProvidesCollisionalCrossSectionConverter ? _msDataFileScanHelper.CCSFromIonMobility(ionMobilityValue, transitions.First().PrecursorMz, libKey.Charge) : null;
var result = new IonMobilityIntensityPair
{
IonMobility = IonMobilityAndCCS.GetIonMobilityAndCCS(ionMobilityValue, ccs, 0),
Intensity = maxIntensity
};
List<IonMobilityIntensityPair> listPairs;
if (!dict.TryGetValue(libKey, out listPairs))
{
listPairs = new List<IonMobilityIntensityPair>();
dict.Add(libKey, listPairs);
}
listPairs.Add(result);
}
}
private static SignedMz FindRangeMz(float tolerance, TransitionFullScanInfo t, MsDataSpectrum scan, out int first)
{
Assume.IsTrue(t.ProductMz.IsNegative == scan.NegativeCharge); // It would be strange if associated scan did not have same polarity
var mzPeak = t.ProductMz;
var halfwin = (t.ExtractionWidth ?? tolerance) / 2;
var mzLow = mzPeak - halfwin;
var mzHigh = mzPeak + halfwin;
first = Array.BinarySearch(scan.Mzs, mzLow);
if (first < 0)
first = ~first;
return mzHigh;
}
private bool IsExtremeMs2Value(double im)
{
return _ms1IonMobilityBest.HasValue &&
(im < _ms1IonMobilityBest.Mobility - _maxHighEnergyDriftOffsetMsec ||
im > _ms1IonMobilityBest.Mobility + _maxHighEnergyDriftOffsetMsec);
}
private void HandleLoadScanException(Exception ex)
{
lock (this)
{
_dataFileScanHelperException = ex;
if (_msDataFileScanHelper != null)
_msDataFileScanHelper.MsDataSpectra = null;
Monitor.PulseAll(this);
}
}
private void SetScans(MsDataSpectrum[] scans)
{
lock (this)
{
_msDataFileScanHelper.MsDataSpectra = scans;
Monitor.PulseAll(this);
}
}
public void Dispose()
{
if (_msDataFileScanHelper != null)
{
_msDataFileScanHelper.Dispose();
_msDataFileScanHelper = null;
}
}
}
}
| 1 | 14,714 | Something is wrong with the indentation here. | ProteoWizard-pwiz | .cs |
@@ -18,12 +18,13 @@ package importtestsuites
import (
"context"
"fmt"
+ "github.com/GoogleCloudPlatform/compute-image-tools/gce_image_import_export_tests/compute"
"log"
"regexp"
+ "strings"
"sync"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path"
- "github.com/GoogleCloudPlatform/compute-image-tools/gce_image_import_export_tests/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/gce_image_import_export_tests/test_suites"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/junitxml"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/test_config" | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package importtestsuites contains e2e tests for image import cli tools
package importtestsuites
import (
"context"
"fmt"
"log"
"regexp"
"sync"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path"
"github.com/GoogleCloudPlatform/compute-image-tools/gce_image_import_export_tests/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/gce_image_import_export_tests/test_suites"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/junitxml"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/test_config"
)
const (
testSuiteName = "ImageImportTests"
)
// TestSuite is image import test suite.
func TestSuite(
ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite,
logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp,
testProjectConfig *testconfig.Project) {
imageImportDataDiskTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[ImageImport] %v", "Import data disk"))
imageImportOSTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[ImageImport] %v", "Import OS"))
imageImportOSFromImageTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[ImageImport] %v", "Import OS from image"))
testsMap := map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project){
imageImportDataDiskTestCase: runImageImportDataDiskTest,
imageImportOSTestCase: runImageImportOSTest,
imageImportOSFromImageTestCase: runImageImportOSFromImageTest,
}
testsuiteutils.TestSuite(ctx, tswg, testSuites, logger, testSuiteRegex, testCaseRegex,
testProjectConfig, testSuiteName, testsMap)
}
func runImageImportDataDiskTest(
ctx context.Context, testCase *junitxml.TestCase,
logger *log.Logger, testProjectConfig *testconfig.Project) {
suffix := pathutils.RandString(5)
imageName := "e2e-test-image-import-data-disk-" + suffix
cmd := "gce_vm_image_import"
args := []string{"-client_id=e2e", fmt.Sprintf("-project=%v", testProjectConfig.TestProjectID),
fmt.Sprintf("-image_name=%s", imageName), "-data_disk", fmt.Sprintf("-source_file=gs://%v-test-image/image-file-10g-vmdk", testProjectConfig.TestProjectID)}
testsuiteutils.RunCliTool(logger, testCase, cmd, args)
verifyImportedImage(ctx, testCase, testProjectConfig, imageName, logger)
}
func runImageImportOSTest(
ctx context.Context, testCase *junitxml.TestCase,
logger *log.Logger, testProjectConfig *testconfig.Project) {
suffix := pathutils.RandString(5)
imageName := "e2e-test-image-import-os-" + suffix
cmd := "gce_vm_image_import"
args := []string{"-client_id=e2e", fmt.Sprintf("-project=%v", testProjectConfig.TestProjectID),
fmt.Sprintf("-image_name=%v", imageName), "-os=debian-9", fmt.Sprintf("-source_file=gs://%v-test-image/image-file-10g-vmdk", testProjectConfig.TestProjectID)}
testsuiteutils.RunCliTool(logger, testCase, cmd, args)
verifyImportedImage(ctx, testCase, testProjectConfig, imageName, logger)
}
func runImageImportOSFromImageTest(
ctx context.Context, testCase *junitxml.TestCase,
logger *log.Logger, testProjectConfig *testconfig.Project) {
suffix := pathutils.RandString(5)
imageName := "e2e-test-image-import-os-from-image-" + suffix
cmd := "gce_vm_image_import"
args := []string{"-client_id=e2e", fmt.Sprintf("-project=%v", testProjectConfig.TestProjectID),
fmt.Sprintf("-image_name=%v", imageName), "-os=debian-9", "-source_image=e2e-test-image-10g"}
testsuiteutils.RunCliTool(logger, testCase, cmd, args)
verifyImportedImage(ctx, testCase, testProjectConfig, imageName, logger)
}
func verifyImportedImage(ctx context.Context, testCase *junitxml.TestCase,
testProjectConfig *testconfig.Project, imageName string, logger *log.Logger) {
logger.Printf("Verifying imported image...")
image, err := compute.CreateImageObject(ctx, testProjectConfig.TestProjectID, imageName)
if err != nil {
testCase.WriteFailure("Error creating compute api client: %v", err)
logger.Printf("Error creating compute api client: %v", err)
return
}
if err := image.Exists(); err != nil {
testCase.WriteFailure("Image '%v' doesn't exist after import: %v", imageName, err)
logger.Printf("Image '%v' doesn't exist after import: %v", imageName, err)
return
}
logger.Printf("Image '%v' exists! Import success.", imageName)
if err := image.Cleanup(); err != nil {
logger.Printf("Image '%v' failed to clean up.", imageName)
} else {
logger.Printf("Image '%v' cleaned up.", imageName)
}
}
| 1 | 8,936 | This should go to the external imports group below | GoogleCloudPlatform-compute-image-tools | go |
@@ -36,6 +36,7 @@ func NewInstaller(dc dynamic.Interface, config map[string]string, paths ...strin
}
for i, p := range paths {
+ log.Println("processing yaml folder", p)
paths[i] = ParseTemplates(p, config)
}
path := strings.Join(paths, ",") | 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"text/template"
yaml "github.com/jcrossley3/manifestival/pkg/manifestival"
"k8s.io/client-go/dynamic"
)
func NewInstaller(dc dynamic.Interface, config map[string]string, paths ...string) *Installer {
if len(paths) == 0 || (len(paths) == 1 && paths[0] == "") {
// default to ko path:
paths[0] = "/var/run/ko/install"
}
for i, p := range paths {
paths[i] = ParseTemplates(p, config)
}
path := strings.Join(paths, ",")
manifest, err := yaml.NewYamlManifest(path, true, dc)
if err != nil {
panic(err)
}
return &Installer{dc: dc, manifest: manifest}
}
func ParseTemplates(path string, config map[string]string) string {
dir, err := ioutil.TempDir("", "processed_yaml")
if err != nil {
panic(err)
}
err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(info.Name(), "yaml") {
t, err := template.ParseFiles(path)
if err != nil {
return err
}
tmpfile, err := ioutil.TempFile(dir, strings.Replace(info.Name(), ".yaml", "-*.yaml", 1))
if err != nil {
log.Fatal(err)
}
err = t.Execute(tmpfile, config)
if err != nil {
log.Print("execute: ", err)
return err
}
_ = tmpfile.Close()
}
return nil
})
log.Print("new files in ", dir)
if err != nil {
panic(err)
}
return dir
}
type Installer struct {
dc dynamic.Interface
manifest yaml.Manifest
}
func (r *Installer) Do(verb string) error {
switch strings.ToLower(verb) {
case "create", "setup", "install", "apply", "start":
return r.manifest.ApplyAll()
case "delete", "teardown", "uninstall", "unapply", "stop":
return r.manifest.DeleteAll()
default:
return fmt.Errorf("unknown verb: %s", verb)
}
}
| 1 | 8,704 | Intended to be checked in? | google-knative-gcp | go |
@@ -307,7 +307,10 @@ public class MicroserviceRegisterTask extends AbstractRegisterTask {
// Currently nothing to do but print a warning
LOGGER.warn("There are schemas only existing in service center: {}, which means there are interfaces changed. "
- + "It's recommended to increment microservice version before deploying.", scSchemaMap.keySet());
+ + "It's recommended to increment microservice version before deploying.",
+ scSchemaMap.keySet());
+ LOGGER.warn("ATTENTION: The schemas in new version are less than the old version, "
+ + "which may cause compatibility problems.");
}
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.serviceregistry.task;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import javax.ws.rs.core.Response.Status;
import org.apache.servicecomb.foundation.common.base.ServiceCombConstants;
import org.apache.servicecomb.serviceregistry.RegistryUtils;
import org.apache.servicecomb.serviceregistry.api.registry.Microservice;
import org.apache.servicecomb.serviceregistry.api.response.GetSchemaResponse;
import org.apache.servicecomb.serviceregistry.client.ServiceRegistryClient;
import org.apache.servicecomb.serviceregistry.client.http.Holder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.StringUtils;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
public class MicroserviceRegisterTask extends AbstractRegisterTask {
private static final Logger LOGGER = LoggerFactory.getLogger(MicroserviceRegisterTask.class);
private boolean schemaIdSetMatch;
public MicroserviceRegisterTask(EventBus eventBus, ServiceRegistryClient srClient, Microservice microservice) {
super(eventBus, srClient, microservice);
this.taskStatus = TaskStatus.READY;
}
public boolean isSchemaIdSetMatch() {
return schemaIdSetMatch;
}
@Subscribe
public void onMicroserviceInstanceHeartbeatTask(MicroserviceInstanceHeartbeatTask task) {
if (task.getHeartbeatResult() != HeartbeatResult.SUCCESS && isSameMicroservice(task.getMicroservice())) {
LOGGER.info("read MicroserviceInstanceHeartbeatTask status is {}", task.taskStatus);
this.taskStatus = TaskStatus.READY;
this.registered = false;
}
}
@Subscribe
public void onInstanceRegistryFailed(MicroserviceInstanceRegisterTask task) {
if (task.taskStatus != TaskStatus.FINISHED) {
LOGGER.info("read MicroserviceInstanceRegisterTask status is {}", task.taskStatus);
this.taskStatus = TaskStatus.READY;
this.registered = false;
}
}
@Override
protected boolean doRegister() {
LOGGER.info("running microservice register task.");
String serviceId = srClient.getMicroserviceId(microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getEnvironment());
if (!StringUtils.isEmpty(serviceId)) {
// This microservice has been registered, so we just use the serviceId gotten from service center
microservice.setServiceId(serviceId);
LOGGER.info(
"Microservice exists in service center, no need to register. id=[{}] appId=[{}], name=[{}], version=[{}], env=[{}]",
serviceId,
microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getEnvironment());
if (!checkSchemaIdSet()) {
return false;
}
} else {
serviceId = srClient.registerMicroservice(microservice);
if (StringUtils.isEmpty(serviceId)) {
LOGGER.error(
"Registry microservice failed. appId=[{}], name=[{}], version=[{}], env=[{}]",
microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getEnvironment());
return false;
}
// In re-register microservice case, the old instanceId should not be cached
microservice.getInstance().setInstanceId(null);
LOGGER.info(
"Registry Microservice successfully. id=[{}] appId=[{}], name=[{}], version=[{}], schemaIds={}, env=[{}]",
serviceId,
microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getSchemas(),
microservice.getEnvironment());
}
microservice.setServiceId(serviceId);
microservice.getInstance().setServiceId(microservice.getServiceId());
return registerSchemas();
}
private boolean checkSchemaIdSet() {
Microservice existMicroservice = srClient.getMicroservice(microservice.getServiceId());
if (existMicroservice == null) {
LOGGER.error("Error to get microservice from service center when check schema set");
return false;
}
Set<String> existSchemas = new HashSet<>(existMicroservice.getSchemas());
Set<String> localSchemas = new HashSet<>(microservice.getSchemas());
schemaIdSetMatch = existSchemas.equals(localSchemas);
if (!schemaIdSetMatch) {
LOGGER.warn(
"SchemaIds is different between local and service center. "
+ "serviceId=[{}] appId=[{}], name=[{}], version=[{}], env=[{}], local schemaIds={}, service center schemaIds={}",
microservice.getServiceId(),
microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getEnvironment(),
localSchemas,
existSchemas);
return true;
}
LOGGER.info(
"SchemaIds are equals to service center. serviceId=[{}], appId=[{}], name=[{}], version=[{}], env=[{}], schemaIds={}",
microservice.getServiceId(),
microservice.getAppId(),
microservice.getServiceName(),
microservice.getVersion(),
microservice.getEnvironment(),
localSchemas);
return true;
}
private boolean registerSchemas() {
Holder<List<GetSchemaResponse>> scSchemaHolder = srClient.getSchemas(microservice.getServiceId());
if (Status.OK.getStatusCode() != scSchemaHolder.getStatusCode()) {
LOGGER.error("failed to get schemas from service center, statusCode = [{}]", scSchemaHolder.getStatusCode());
return false;
}
Map<String, GetSchemaResponse> scSchemaMap = convertScSchemaMap(scSchemaHolder);
// CHECK: local > sc, local != sc
for (Entry<String, String> localSchemaEntry : microservice.getSchemaMap().entrySet()) {
if (!registerSchema(scSchemaMap, localSchemaEntry)) {
return false;
}
}
// CHECK: local < sc
checkRemainingSchema(scSchemaMap);
schemaIdSetMatch = true;
return true;
}
/**
* Check whether a local schema is equal to a sc schema.
* @return true if the local schema is equal to a sc schema, or be registered to sc successfully;
* false if schema is registered to sc but failed.
* @throws IllegalStateException The environment is not modifiable, and the local schema is different from sc schema
* or not exist in sc.
*/
private boolean registerSchema(Map<String, GetSchemaResponse> scSchemaMap,
Entry<String, String> localSchemaEntry) {
GetSchemaResponse scSchema = scSchemaMap.get(localSchemaEntry.getKey());
boolean onlineSchemaExists = scSchema != null;
LOGGER.info("schemaId [{}] exists [{}], summary exists [{}]", localSchemaEntry.getKey(), onlineSchemaExists,
scSchema != null && scSchema.getSummary() != null);
if (!onlineSchemaExists) {
// local > sc
return registerNewSchema(localSchemaEntry);
}
scSchemaMap.remove(localSchemaEntry.getKey());
// local != sc
return compareAndReRegisterSchema(localSchemaEntry, scSchema);
}
/**
* Try to register a new schema to service center, or throw exception if cannot register.
* @param localSchemaEntry local schema to be registered.
* @return whether local schema is registered successfully.
* @throws IllegalStateException The environment is unmodifiable.
*/
private boolean registerNewSchema(Entry<String, String> localSchemaEntry) {
// The ids of schemas are contained by microservice registry request, which means once a microservice
// is registered in the service center, the schemas that it contains are determined.
// If we get a microservice but cannot find the given schemaId in it's schemaId list, this means that
// the schemas of this microservice has been changed, and we should decide whether to register this new
// schema according to it's environment configuration.
if (onlineSchemaIsModifiable()) {
return registerSingleSchema(localSchemaEntry.getKey(), localSchemaEntry.getValue());
}
throw new IllegalStateException(
"There is a schema only existing in local microservice: [" + localSchemaEntry.getKey()
+ "], which means there are interfaces changed. "
+ "You need to increment microservice version before deploying, "
+ "or you can configure service_description.environment="
+ ServiceCombConstants.DEVELOPMENT_SERVICECOMB_ENV
+ " to work in development environment and ignore this error");
}
/**
* Compare schema summary and determine whether to re-register schema or throw exception.
* @param localSchemaEntry local schema
* @param scSchema schema in service center
* @return true if the two copies of schema are the same, or local schema is re-registered successfully,
* false if the local schema is re-registered to service center but failed.
* @throws IllegalStateException The two copies of schema are different and the environment is not modifiable.
*/
private boolean compareAndReRegisterSchema(Entry<String, String> localSchemaEntry, GetSchemaResponse scSchema) {
String scSchemaSummary = getScSchemaSummary(scSchema);
if (null == scSchemaSummary) {
// cannot get scSchemaSummary, which means there is no schema content in sc, register schema directly
return registerSingleSchema(localSchemaEntry.getKey(), localSchemaEntry.getValue());
}
String localSchemaSummary = RegistryUtils.calcSchemaSummary(localSchemaEntry.getValue());
if (!localSchemaSummary.equals(scSchemaSummary)) {
if (onlineSchemaIsModifiable()) {
LOGGER.info(
"schema[{}]'s content is changed and the current environment is [{}], so re-register it!",
localSchemaEntry.getKey(), ServiceCombConstants.DEVELOPMENT_SERVICECOMB_ENV);
return registerSingleSchema(localSchemaEntry.getKey(), localSchemaEntry.getValue());
}
// env is not development, throw an exception and break the init procedure
throw new IllegalStateException(
"The schema(id=[" + localSchemaEntry.getKey()
+ "]) content held by this instance and the service center is different. "
+ "You need to increment microservice version before deploying. "
+ "Or you can configure service_description.environment="
+ ServiceCombConstants.DEVELOPMENT_SERVICECOMB_ENV
+ " to work in development environment and ignore this error");
}
// summaries are the same
return true;
}
/**
* Try to get or calculate scSchema summary.
* @return summary of scSchema,
* or null if there is no schema content in service center
*/
private String getScSchemaSummary(GetSchemaResponse scSchema) {
String scSchemaSummary = scSchema.getSummary();
if (null != scSchemaSummary) {
return scSchemaSummary;
}
// if there is no online summery, query online schema content directly and calculate summary
String onlineSchemaContent = srClient.getSchema(microservice.getServiceId(), scSchema.getSchemaId());
if (null != onlineSchemaContent) {
scSchemaSummary = RegistryUtils.calcSchemaSummary(onlineSchemaContent);
}
return scSchemaSummary;
}
/**
* Check whether there are schemas remaining in service center but not exist in local microservice.
* @throws IllegalStateException There are schemas only existing in service center, and the environment is unmodifiable.
*/
private void checkRemainingSchema(Map<String, GetSchemaResponse> scSchemaMap) {
if (!scSchemaMap.isEmpty()) {
// there are some schemas only exist in service center
if (!onlineSchemaIsModifiable()) {
// env is not development, throw an exception and break the init procedure
throw new IllegalStateException("There are schemas only existing in service center: " + scSchemaMap.keySet()
+ ", which means there are interfaces changed. "
+ "You need to increment microservice version before deploying, "
+ "or if service_description.environment="
+ ServiceCombConstants.DEVELOPMENT_SERVICECOMB_ENV
+ ", you can delete microservice information in service center and restart this instance.");
}
// Currently nothing to do but print a warning
LOGGER.warn("There are schemas only existing in service center: {}, which means there are interfaces changed. "
+ "It's recommended to increment microservice version before deploying.", scSchemaMap.keySet());
}
}
private boolean onlineSchemaIsModifiable() {
return ServiceCombConstants.DEVELOPMENT_SERVICECOMB_ENV.equalsIgnoreCase(microservice.getEnvironment());
}
/**
* Register a schema directly.
* @return true if register success, otherwise false
*/
private boolean registerSingleSchema(String schemaId, String content) {
return srClient.registerSchema(microservice.getServiceId(), schemaId, content);
}
private Map<String, GetSchemaResponse> convertScSchemaMap(Holder<List<GetSchemaResponse>> scSchemaHolder) {
Map<String, GetSchemaResponse> scSchemaMap = new HashMap<>();
List<GetSchemaResponse> scSchemaList = scSchemaHolder.getValue();
if (null == scSchemaList) {
return scSchemaMap;
}
for (GetSchemaResponse scSchema : scSchemaList) {
scSchemaMap.put(scSchema.getSchemaId(), scSchema);
}
return scSchemaMap;
}
}
| 1 | 9,861 | I think this warning message is the same as above one. Anyway, it's fine to keep it. | apache-servicecomb-java-chassis | java |
@@ -46,7 +46,7 @@ namespace Nethermind.Logging
if (NLog.LogManager.Configuration?.AllTargets.SingleOrDefault(t => t.Name == "file") is FileTarget target)
{
- target.FileName = !Path.IsPathFullyQualified(fileName) ? Path.Combine("logs", fileName) : fileName;
+ target.FileName = !Path.IsPathFullyQualified(fileName) ? Path.Combine(logsDir, fileName) : fileName;
}
/* NOTE: minor perf gain - not planning to switch logging levels while app is running */ | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.IO;
using System.Linq;
using NLog.Targets;
namespace Nethermind.Logging
{
public class NLogLogger : ILogger
{
public bool IsError { get; }
public bool IsWarn { get; }
public bool IsInfo { get; }
public bool IsDebug { get; }
public bool IsTrace { get; }
internal readonly NLog.Logger Logger;
public NLogLogger(Type type, string fileName, string logDirectory = null, string loggerName = null)
{
loggerName = string.IsNullOrEmpty(loggerName) ? type.FullName.Replace("Nethermind.", string.Empty) : loggerName;
Logger = NLog.LogManager.GetLogger(loggerName);
var logsDir = string.IsNullOrEmpty(logDirectory) ? Path.Combine(PathUtils.GetExecutingDirectory(), "logs") : logDirectory;
if (!Directory.Exists(logsDir))
{
Directory.CreateDirectory(logsDir);
}
if (NLog.LogManager.Configuration?.AllTargets.SingleOrDefault(t => t.Name == "file") is FileTarget target)
{
target.FileName = !Path.IsPathFullyQualified(fileName) ? Path.Combine("logs", fileName) : fileName;
}
/* NOTE: minor perf gain - not planning to switch logging levels while app is running */
// TODO: review the behaviour on log levels switching which we have just added recently...
IsInfo = Logger.IsInfoEnabled;
IsWarn = Logger.IsWarnEnabled;
IsDebug = Logger.IsDebugEnabled;
IsTrace = Logger.IsTraceEnabled;
IsError = Logger.IsErrorEnabled || Logger.IsFatalEnabled;
}
public NLogLogger(string fileName, string logDirectory = null, string loggerName = null)
{
loggerName = string.IsNullOrEmpty(loggerName) ? StackTraceUsageUtils.GetClassFullName().Replace("Nethermind.", string.Empty) : loggerName;
Logger = NLog.LogManager.GetLogger(loggerName);
var logsDir = string.IsNullOrEmpty(logDirectory) ? Path.Combine(PathUtils.GetExecutingDirectory(), "logs") : logDirectory;
if (!Directory.Exists(logsDir))
{
Directory.CreateDirectory(logsDir);
}
if (NLog.LogManager.Configuration?.AllTargets.SingleOrDefault(t => t.Name == "file") is FileTarget target)
{
target.FileName = !Path.IsPathFullyQualified(fileName) ? Path.Combine("logs", fileName) : fileName;
}
/* NOTE: minor perf gain - not planning to switch logging levels while app is running */
IsInfo = Logger.IsInfoEnabled;
IsWarn = Logger.IsWarnEnabled;
IsDebug = Logger.IsDebugEnabled;
IsTrace = Logger.IsTraceEnabled;
IsError = Logger.IsErrorEnabled || Logger.IsFatalEnabled;
}
private string Level
{
get
{
if (IsTrace) return "Trace";
if (IsDebug) return "Debug";
if (IsInfo) return "Info";
if (IsWarn) return "Warn";
if (IsError) return "Error";
return "None";
}
}
private void Log(string text)
{
Logger.Info(text);
}
public void Info(string text)
{
Logger.Info(text);
}
public void Warn(string text)
{
Logger.Warn(text);
}
public void Debug(string text)
{
Logger.Debug(text);
}
public void Trace(string text)
{
Logger.Trace(text);
}
public void Error(string text, Exception ex = null)
{
Logger.Error(ex, text);
}
}
} | 1 | 22,672 | This is done few times, replace with some well named method | NethermindEth-nethermind | .cs |
@@ -101,14 +101,14 @@ public class CodeGeneratorTool {
private static int generate(
String descriptorSet,
- String[] apiConfigs,
+ String[] configs,
String[] generatorConfigs,
String packageConfig,
String outputDirectory,
String[] enabledArtifacts) {
ToolOptions options = ToolOptions.create();
options.set(ToolOptions.DESCRIPTOR_SET, descriptorSet);
- options.set(ToolOptions.CONFIG_FILES, Lists.newArrayList(apiConfigs));
+ options.set(ToolOptions.CONFIG_FILES, Lists.newArrayList(configs));
options.set(CodeGeneratorApi.OUTPUT_FILE, outputDirectory);
options.set(CodeGeneratorApi.GENERATOR_CONFIG_FILES, Lists.newArrayList(generatorConfigs));
options.set(CodeGeneratorApi.PACKAGE_CONFIG_FILE, packageConfig); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen;
import com.google.api.tools.framework.tools.ToolOptions;
import com.google.common.collect.Lists;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
// Example usage: (assuming environment variable BASE is the base directory of the project
// containing the YAMLs, descriptor set, and output)
//
// CodeGeneratorTool --descriptor_set=$BASE/src/main/generated/_descriptors/bigtable.desc \
// --service_yaml=$BASE/src/main/configs/bigtabletableadmin.yaml \
// --gapic_yaml=$BASE/src/main/configs/bigtable_table_gapic.yaml \
// --output=$BASE
public class CodeGeneratorTool {
public static void main(String[] args) throws Exception {
Options options = new Options();
options.addOption("h", "help", false, "show usage");
options.addOption(
Option.builder()
.longOpt("descriptor_set")
.desc("The descriptor set representing the compiled input protos.")
.hasArg()
.argName("DESCRIPTOR-SET")
.required(true)
.build());
options.addOption(
Option.builder()
.longOpt("service_yaml")
.desc("The service YAML configuration file or files.")
.hasArg()
.argName("SERVICE-YAML")
.required(true)
.build());
options.addOption(
Option.builder()
.longOpt("gapic_yaml")
.desc("The GAPIC YAML configuration file or files.")
.hasArg()
.argName("GAPIC-YAML")
.required(true)
.build());
options.addOption(
Option.builder()
.longOpt("package_yaml")
.desc("The package metadata YAML configuration file.")
.hasArg()
.argName("PACKAGE-YAML")
.build());
options.addOption(
Option.builder("o")
.longOpt("output")
.desc("The directory in which to output the generated client library.")
.hasArg()
.argName("OUTPUT-DIRECTORY")
.build());
options.addOption(
Option.builder()
.longOpt("enabled_artifacts")
.desc(
"Optional. Artifacts enabled for the generator. "
+ "Currently supports 'surface' and 'test'.")
.hasArg()
.argName("ENABLED_ARTIFACTS")
.required(false)
.build());
CommandLine cl = (new DefaultParser()).parse(options, args);
if (cl.hasOption("help")) {
HelpFormatter formater = new HelpFormatter();
formater.printHelp("CodeGeneratorTool", options);
}
int exitCode =
generate(
cl.getOptionValue("descriptor_set"),
cl.getOptionValues("service_yaml"),
cl.getOptionValues("gapic_yaml"),
cl.getOptionValue("package_yaml"),
cl.getOptionValue("output", ""),
cl.getOptionValues("enabled_artifacts"));
System.exit(exitCode);
}
private static int generate(
String descriptorSet,
String[] apiConfigs,
String[] generatorConfigs,
String packageConfig,
String outputDirectory,
String[] enabledArtifacts) {
ToolOptions options = ToolOptions.create();
options.set(ToolOptions.DESCRIPTOR_SET, descriptorSet);
options.set(ToolOptions.CONFIG_FILES, Lists.newArrayList(apiConfigs));
options.set(CodeGeneratorApi.OUTPUT_FILE, outputDirectory);
options.set(CodeGeneratorApi.GENERATOR_CONFIG_FILES, Lists.newArrayList(generatorConfigs));
options.set(CodeGeneratorApi.PACKAGE_CONFIG_FILE, packageConfig);
if (enabledArtifacts != null) {
options.set(CodeGeneratorApi.ENABLED_ARTIFACTS, Lists.newArrayList(enabledArtifacts));
}
CodeGeneratorApi codeGen = new CodeGeneratorApi(options);
return codeGen.run();
}
}
| 1 | 22,014 | `configs` isn't very descriptive, especially when there are other config-ish things like `generatorConfigs`. | googleapis-gapic-generator | java |
@@ -192,7 +192,12 @@ class TestGatlingExecutor(BZTestCase):
}]},
{"url": "/",
"think-time": 2,
- "follow-redirects": True}]
+ "follow-redirects": True},
+ {"url": "/reserve.php",
+ "method": "POST",
+ "body": u"Body Content 2",
+ }
+ ]
}
})
self.obj.prepare() | 1 | import logging
import os
import shutil
import time
from bzt import ToolError, TaurusConfigError
from bzt.modules.aggregator import DataPoint
from bzt.modules.gatling import GatlingExecutor, DataLogReader
from bzt.modules.provisioning import Local
from bzt.six import u
from bzt.utils import EXE_SUFFIX, get_full_path
from tests import BZTestCase, __dir__, RESOURCES_DIR, BUILD_DIR, close_reader_file
from tests.mocks import EngineEmul
def get_gatling():
path = os.path.abspath(RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX)
obj = GatlingExecutor()
obj.engine = EngineEmul()
obj.env = obj.engine.env
obj.settings.merge({"path": path})
return obj
class TestGatlingExecutor(BZTestCase):
def setUp(self):
super(TestGatlingExecutor, self).setUp()
self.obj = get_gatling()
def tearDown(self):
if self.obj.stdout_file:
self.obj.stdout_file.close()
if self.obj.stderr_file:
self.obj.stderr_file.close()
close_reader_file(self.obj.reader)
super(TestGatlingExecutor, self).tearDown()
def test_external_jar_wrong_launcher(self):
modified_launcher = self.obj.engine.create_artifact('wrong-gatling', EXE_SUFFIX)
origin_launcher = get_full_path(self.obj.settings['path'])
with open(origin_launcher) as orig_file:
with open(modified_launcher, 'w') as mod_file:
for line in orig_file.readlines():
if 'COMPILATION_CLASSPATH' not in line:
mod_file.writelines([line])
os.chmod(modified_launcher, 0o755)
self.obj.settings.merge({"path": modified_launcher})
self.obj.execution.merge({
'files': [
'tests/resources/grinder/fake_grinder.jar',
'tests/resources/selenium/junit/jar'],
'scenario': 'tests/resources/gatling/bs'})
self.assertRaises(ToolError, self.obj.prepare)
def test_additional_classpath(self):
jars = ("gatling", "simulations.jar"), ("gatling", "deps.jar"), ("grinder", "fake_grinder.jar")
jars = list(os.path.join(RESOURCES_DIR, *jar) for jar in jars)
self.obj.execution.merge({
"files": [jars[0]],
"scenario": {
"script": RESOURCES_DIR + "gatling/BasicSimulation.scala",
"additional-classpath": [jars[1]]}})
self.obj.settings.merge({"additional-classpath": [jars[2]]})
self.obj.prepare()
for jar in jars:
for var in ("JAVA_CLASSPATH", "COMPILATION_CLASSPATH"):
self.assertIn(jar, self.obj.env.get(var))
def test_external_jar_right_launcher(self):
self.obj.execution.merge({
'files': [
'tests/resources/grinder/fake_grinder.jar',
'tests/resources/selenium/junit/jar'],
'scenario': {
"script": RESOURCES_DIR + "gatling/BasicSimulation.scala",
"simulation": "mytest.BasicSimulation"}})
self.obj.prepare()
self.obj.startup()
self.obj.shutdown()
modified_launcher = self.obj.launcher
with open(modified_launcher) as modified:
modified_lines = modified.readlines()
for jar in ('fake_grinder.jar', 'another_dummy.jar'):
for var in ("JAVA_CLASSPATH", "COMPILATION_CLASSPATH"):
self.assertIn(jar, self.obj.env.get(var))
for line in modified_lines:
self.assertFalse(line.startswith('set COMPILATION_CLASSPATH=""'))
self.assertTrue(not line.startswith('COMPILATION_CLASSPATH=') or
line.endswith('":${COMPILATION_CLASSPATH}"\n'))
with open(self.obj.stdout_file.name) as stdout:
out_lines = stdout.readlines()
out_lines = [out_line.rstrip() for out_line in out_lines]
self.assertEqual(out_lines[-4], get_full_path(self.obj.settings['path'], step_up=2)) # $GATLING_HOME
self.assertIn('fake_grinder.jar', out_lines[-3]) # $COMPILATION_CLASSPATH
self.assertIn('another_dummy.jar', out_lines[-3]) # $COMPILATION_CLASSPATH
self.assertEqual(out_lines[-2], 'TRUE') # $NO_PAUSE
def test_install_Gatling(self):
path = os.path.abspath(BUILD_DIR + "gatling-taurus/bin/gatling" + EXE_SUFFIX)
shutil.rmtree(os.path.dirname(os.path.dirname(path)), ignore_errors=True)
download_link = "file:///" + RESOURCES_DIR + "gatling/gatling-dist-{version}.zip"
gatling_version = '2.3.0'
self.assertFalse(os.path.exists(path))
self.obj.settings.merge({
"path": path,
"download-link": download_link,
"version": gatling_version})
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "gatling/BasicSimulation.scala",
"simulation": "mytest.BasicSimulation"}})
self.obj.prepare()
self.assertTrue(os.path.exists(path))
def test_gatling_widget(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "gatling/BasicSimulation.scala"}})
self.obj.prepare()
self.obj.get_widget()
self.assertEqual(self.obj.widget.widgets[0].text, "Gatling: BasicSimulation.scala")
def test_resource_files_collection_remote2(self): # script = <dir>
script_path = RESOURCES_DIR + "gatling/bs"
self.obj.execution.merge({"scenario": {"script": script_path}})
res_files = self.obj.resource_files()
self.assertPathsEqual(res_files, [script_path])
def test_resource_files_collection_local(self):
script = "LocalBasicSimulation.scala"
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "gatling/" + script}})
self.obj.prepare()
artifacts = os.listdir(self.obj.engine.artifacts_dir)
self.assertNotIn(script, artifacts)
def test_env_type(self):
script = "LocalBasicSimulation.scala"
self.obj.execution.merge({
"concurrency": 2,
"hold-for": 1000,
"throughput": 100,
"scenario": {"script": RESOURCES_DIR + "gatling/" + script}})
self.obj.prepare()
self.obj.engine.artifacts_dir = u(self.obj.engine.artifacts_dir)
self.obj.startup()
self.obj.shutdown()
with open(self.obj.stdout_file.name) as fds:
lines = fds.readlines()
self.assertIn('throughput', lines[-1])
def test_warning_for_throughput_without_duration(self):
script = "LocalBasicSimulation.scala"
self.obj.execution.merge({
"concurrency": 2,
"throughput": 100,
"scenario": {"script": RESOURCES_DIR + "gatling/" + script}})
self.obj.prepare()
self.obj.engine.artifacts_dir = u(self.obj.engine.artifacts_dir)
self.obj.startup()
self.obj.shutdown()
with open(self.obj.stdout_file.name) as fds:
lines = fds.readlines()
self.assertNotIn('throughput', lines[-1])
def test_requests_1(self):
self.obj.execution.merge({
"concurrency": 10,
"iterations": 5,
"scenario": {
"think-time": 1,
"follow-redirects": False,
"default-address": "blazedemo.com",
"headers": {"H1": "V1"},
"requests": [{"url": "/reserve.php",
"headers": {"H2": "V2"},
"method": "POST",
"body": "Body Content",
"assert": [{
"contains": ["bootstrap.min"],
"not": True
}]},
{"url": "/",
"think-time": 2,
"follow-redirects": True}]
}
})
self.obj.prepare()
scala_file = self.obj.engine.artifacts_dir + '/' + self.obj.get_scenario().get('simulation') + '.scala'
self.assertFilesEqual(RESOURCES_DIR + "gatling/generated1.scala", scala_file,
self.obj.get_scenario().get('simulation'), "SIMNAME")
def test_requests_def_addr_is_none(self):
self.obj.execution.merge({
"concurrency": 10,
"hold-for": 110,
"throughput": 33,
"ramp-up": 30,
"scenario": {
'keepalive': False,
'timeout': '100ms',
'requests': ['http://blazedemo.com', 'google.com']
}
})
self.obj.prepare()
def test_requests_def_addr_is_empty(self):
self.obj.execution.merge({
"concurrency": 10,
"hold-for": 110,
"throughput": 33,
"ramp-up": 30,
"scenario": {
'default-address': '',
'keepalive': False,
'timeout': '100ms',
'requests': ['http://blazedemo.com', 'google.com']
}
})
self.obj.prepare()
def test_requests_3(self):
self.obj.execution.merge({
"iterations": 55,
"scenario": {
"requests": [{'url': 'http://site.com/reserve.php',
'assert': [{
'contains': [200],
'subject': 'http-code',
'not': False
}]}]
}
})
self.obj.prepare()
scala_file = self.obj.engine.artifacts_dir + '/' + self.obj.get_scenario().get('simulation') + '.scala'
self.assertFilesEqual(RESOURCES_DIR + "gatling/generated3.scala", scala_file,
self.obj.get_scenario().get('simulation'), "SIMNAME")
def test_requests_4(self):
self.obj.execution.merge({
"iterations": 55,
"scenario": {
"default-address": "",
"requests": [{'url': 'site.com/reserve.php',
'assert': [{
'subject': 'body',
'contains': 'boot(.*)strap.min',
'regexp': True,
'not': False
}]}]
}
})
self.obj.prepare()
scala_file = self.obj.engine.artifacts_dir + '/' + self.obj.get_scenario().get('simulation') + '.scala'
self.assertFilesEqual(RESOURCES_DIR + "gatling/generated4.scala", scala_file,
self.obj.get_scenario().get('simulation'), "SIMNAME")
def test_requests_5(self):
self.obj.execution.merge({
"iterations": 55,
"scenario": {
"default-address": "blazedemo.com",
"requests": [{'url': '/reserve.php',
'assert': [{
'subject': 'body',
'regexp': True,
'not': False
}]}]
}
})
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_fail_on_zero_results(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "gatling/bs/BasicSimulation.scala"}})
self.obj.prepare()
self.obj.engine.prepared = [self.obj]
self.obj.engine.started = [self.obj]
prov = Local()
prov.engine = self.obj.engine
prov.executors = [self.obj]
self.obj.engine.provisioning = prov
self.obj.reader.buffer = ['some info']
self.obj.engine.provisioning.post_process()
def test_no_simulation(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "gatling/bs/BasicSimulation.scala"}})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
def test_full_Gatling(self):
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "gatling/bs/BasicSimulation.scala",
"simulation": "fake"
}
})
self.obj.prepare()
self.obj.settings.merge({"path": RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX})
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
def test_interactive_request(self):
self.obj.engine.existing_artifact(RESOURCES_DIR + "gatling/SimpleSimulation.scala")
self.obj.execution.merge({
"scenario": {
"script": self.obj.engine.artifacts_dir + "/SimpleSimulation.scala",
"simulation": "SimpleSimulation"}})
self.obj.prepare()
self.obj.settings.merge({"path": RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX})
counter1 = 0
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
counter1 += 1
self.obj.shutdown()
self.obj.post_process()
self.tearDown() # Carthage must be destroyed...
self.setUp()
self.obj.engine.existing_artifact(RESOURCES_DIR + "gatling/SimpleSimulation.scala")
self.obj.engine.existing_artifact(RESOURCES_DIR + "gatling/generated1.scala")
self.obj.execution.merge({
"scenario": {
"script": self.obj.engine.artifacts_dir + "/SimpleSimulation.scala",
"simulation": "fake"}})
self.obj.prepare()
self.obj.settings.merge({"path": RESOURCES_DIR + "gatling/gatling" + EXE_SUFFIX})
counter2 = 0
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
counter2 += 1
if counter2 > counter1 * 5:
self.fail('It seems gatling made interactive request')
self.obj.shutdown()
self.obj.post_process()
except TaurusConfigError:
return
self.fail('ValueError not found')
def test_script_jar(self):
self.obj.execution.merge({"scenario": {"script": RESOURCES_DIR + "gatling/simulations.jar",
"simulation": "tests.gatling.BasicSimulation"}})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
for var in ("JAVA_CLASSPATH", "COMPILATION_CLASSPATH"):
self.assertIn("simulations.jar", self.obj.env.get(var))
def test_files_find_file(self):
curdir = get_full_path(os.curdir)
try:
os.chdir(__dir__() + "/../")
self.obj.engine.file_search_paths.append(RESOURCES_DIR + "gatling/")
self.obj.engine.config.merge({
"execution": {
"scenario": {
"script": "simulations.jar",
"simulation": "tests.gatling.BasicSimulation"
},
"files": ["deps.jar"]
}
})
self.obj.execution.merge(self.obj.engine.config["execution"])
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
for jar in ("simulations.jar", "deps.jar"):
for var in ("JAVA_CLASSPATH", "COMPILATION_CLASSPATH"):
self.assertIn(jar, self.obj.env.get(var))
finally:
os.chdir(curdir)
def test_data_sources(self):
self.obj.execution.merge({
"scenario": {
"data-sources": [{
"path": RESOURCES_DIR + "test1.csv",
"delimiter": ","
}],
"requests": ["http://blazedemo.com/?tag=${col1}"],
}
})
self.obj.prepare()
scala_file = self.obj.engine.artifacts_dir + '/' + self.obj.get_scenario().get('simulation') + '.scala'
self.assertFilesEqual(RESOURCES_DIR + "gatling/generated_data_sources.scala", scala_file,
self.obj.get_scenario().get('simulation'), "SIMNAME")
self.assertTrue(os.path.exists(os.path.join(self.obj.engine.artifacts_dir, 'test1.csv')))
def test_resource_files_data_sources(self):
csv_path = RESOURCES_DIR + "test1.csv"
jar_file = "path_to_my_jar"
self.obj.execution.merge({
"scenario": {
"data-sources": [csv_path],
"requests": ["http://blazedemo.com/"],
}
})
self.obj.settings.merge({'additional-classpath': [jar_file]})
res_files = self.obj.resource_files()
self.assertEqual(res_files, [csv_path, jar_file])
def test_diagnostics(self):
self.obj.execution.merge({
"scenario": {
"script": RESOURCES_DIR + "gatling/simulations.jar",
"simulation": "tests.gatling.BasicSimulation"}})
self.obj.prepare()
try:
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
finally:
self.obj.shutdown()
self.obj.post_process()
self.assertIsNotNone(self.obj.get_error_diagnostics())
def test_properties_migration(self):
self.obj.execution.merge({
"scenario": {
"keepalive": True,
"requests": ["http://blazedemo.com/"]}
})
self.obj.execute = lambda *args, **kwargs: None
self.obj.prepare()
self.obj.startup()
self.assertIn("gatling.http.ahc.allowPoolingConnections=true", self.obj.env.get("JAVA_OPTS"))
self.assertIn("gatling.http.ahc.keepAlive=true", self.obj.env.get("JAVA_OPTS"))
def test_properties_2levels(self):
self.obj.settings.merge({
"properties": {
"settlevel": "settval",
"override": 1,
},
})
self.obj.execution.merge({
"scenario": {
"properties": {
"scenlevel": "scenval",
"override": 2,
},
"requests": ["http://blazedemo.com/"]}
})
self.obj.execute = lambda *args, **kwargs: None
self.obj.prepare()
self.obj.startup()
self.assertIn("-Dscenlevel=scenval", self.obj.env.get("JAVA_OPTS"))
self.assertIn("-Dsettlevel=settval", self.obj.env.get("JAVA_OPTS"))
self.assertIn("-Doverride=2", self.obj.env.get("JAVA_OPTS"))
class TestDataLogReader(BZTestCase):
def test_read(self):
log_path = RESOURCES_DIR + "gatling/"
obj = DataLogReader(log_path, logging.getLogger(''), 'gatling-0')
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 23)
self.assertEqual(obj.guessed_gatling_version, "2.1")
self.assertIn('request_1', list_of_values[-1][DataPoint.CUMULATIVE].keys())
def test_read_asserts(self):
log_path = RESOURCES_DIR + "gatling/"
obj = DataLogReader(log_path, logging.getLogger(''), 'gatling-1')
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 3)
self.assertEqual(obj.guessed_gatling_version, "2.2+")
self.assertIn('ping request', list_of_values[-1][DataPoint.CUMULATIVE].keys())
def test_read_220_format(self):
log_path = RESOURCES_DIR + "gatling/"
obj = DataLogReader(log_path, logging.getLogger(''), 'gatling-220')
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 4)
self.assertEqual(obj.guessed_gatling_version, "2.2+")
self.assertIn('/', list_of_values[-1][DataPoint.CUMULATIVE].keys())
def test_read_labels_problematic(self):
log_path = RESOURCES_DIR + "gatling/"
obj = DataLogReader(log_path, logging.getLogger(''), 'gatling-2') # problematic one
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 5)
self.assertEqual(obj.guessed_gatling_version, "2.2+")
self.assertIn('User-Login,Auth-POST', list_of_values[-1][DataPoint.CUMULATIVE].keys())
def test_read_labels_regular(self):
log_path = RESOURCES_DIR + "gatling/"
obj = DataLogReader(log_path, logging.getLogger(''), 'gatling-3') # regular one
list_of_values = list(obj.datapoints(True))
self.assertEqual(len(list_of_values), 10)
self.assertEqual(obj.guessed_gatling_version, "2.2+")
self.assertIn('http://blazedemo.com/', list_of_values[-1][DataPoint.CUMULATIVE].keys())
| 1 | 15,077 | In my point we have to check conversion with specific (non-ASCII) characters in unicode string. | Blazemeter-taurus | py |
@@ -1,6 +1,7 @@
tests = [
- ("python", "EmbedLib.py", {}),
+ # ("python", "EmbedLib.py", {}),
("python", "UnitTestEmbed.py", {}),
+ ("python", "UnitTestExcludedVolume.py", {}),
("python", "UnitTestPharmacophore.py", {}),
]
| 1 | tests = [
("python", "EmbedLib.py", {}),
("python", "UnitTestEmbed.py", {}),
("python", "UnitTestPharmacophore.py", {}),
]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
| 1 | 16,062 | Given that file still contains doctests (and should still contain doctests), they should be run. Please turn this back on. | rdkit-rdkit | cpp |
@@ -70,6 +70,7 @@ public class SignUtils {
private static final String ATTR_CONTENTS = "contents";
private static final String ATTR_CERT_DNS_DOMAIN = "certDnsDomain";
private static final String ATTR_AUDIT_ENABLED = "auditEnabled";
+ private static final String ATTR_SELF_SERVE = "selfserve";
private static Struct asStruct(DomainPolicies domainPolicies) {
// all of our fields are in canonical order based | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.common.utils;
import java.util.List;
import com.yahoo.athenz.zms.Assertion;
import com.yahoo.athenz.zms.DomainData;
import com.yahoo.athenz.zms.DomainPolicies;
import com.yahoo.athenz.zms.Policy;
import com.yahoo.athenz.zms.PublicKeyEntry;
import com.yahoo.athenz.zms.Role;
import com.yahoo.athenz.zms.RoleMember;
import com.yahoo.athenz.zms.ServiceIdentity;
import com.yahoo.athenz.zms.SignedPolicies;
import com.yahoo.athenz.zts.PolicyData;
import com.yahoo.athenz.zts.SignedPolicyData;
import com.yahoo.rdl.Array;
import com.yahoo.rdl.Struct;
public class SignUtils {
private static final String ATTR_ENABLED = "enabled";
private static final String ATTR_MODIFIED = "modified";
private static final String ATTR_POLICIES = "policies";
private static final String ATTR_DOMAIN = "domain";
private static final String ATTR_EXPIRES = "expires";
private static final String ATTR_POLICY_DATA = "policyData";
private static final String ATTR_ZMS_SIGNATURE = "zmsSignature";
private static final String ATTR_ZMS_KEY_ID = "zmsKeyId";
private static final String ATTR_MEMBERS = "members";
private static final String ATTR_ROLE_MEMBERS = "roleMembers";
private static final String ATTR_MEMBER_NAME = "memberName";
private static final String ATTR_EXPIRATION = "expiration";
private static final String ATTR_NAME = "name";
private static final String ATTR_ROLE = "role";
private static final String ATTR_SERVICES = "services";
private static final String ATTR_ID = "id";
private static final String ATTR_PUBLIC_KEYS = "publicKeys";
private static final String ATTR_ACCOUNT = "account";
private static final String ATTR_YPMID = "ypmId";
private static final String ATTR_EFFECT = "effect";
private static final String ATTR_ACTION = "action";
private static final String ATTR_RESOURCE = "resource";
private static final String ATTR_ASSERTIONS = "assertions";
private static final String ATTR_EXECUTABLE = "executable";
private static final String ATTR_DESCRIPTION = "descrition";
private static final String ATTR_TRUST = "trust";
private static final String ATTR_GROUP = "group";
private static final String ATTR_PROVIDER_ENDPOINT = "providerEndpoint";
private static final String ATTR_USER = "user";
private static final String ATTR_HOSTS = "hosts";
private static final String ATTR_KEY = "key";
private static final String ATTR_ROLES = "roles";
private static final String ATTR_SIGNATURE = "signature";
private static final String ATTR_KEYID = "keyId";
private static final String ATTR_CONTENTS = "contents";
private static final String ATTR_CERT_DNS_DOMAIN = "certDnsDomain";
private static final String ATTR_AUDIT_ENABLED = "auditEnabled";
private static Struct asStruct(DomainPolicies domainPolicies) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_DOMAIN, domainPolicies.getDomain());
Array policiesArray = new Array();
for (Policy policy : domainPolicies.getPolicies()) {
policiesArray.add(asStruct(policy));
}
appendArray(struct, ATTR_POLICIES, policiesArray);
return struct;
}
private static Struct asStruct(Policy policy) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
List<Assertion> assertions = policy.getAssertions();
if (assertions != null && !assertions.isEmpty()) {
Array assertionsArray = new Array();
for (Assertion assertion : assertions) {
Struct structAssertion = new Struct();
appendObject(structAssertion, ATTR_ACTION, assertion.getAction());
appendObject(structAssertion, ATTR_EFFECT, assertion.getEffect());
appendObject(structAssertion, ATTR_RESOURCE, assertion.getResource());
appendObject(structAssertion, ATTR_ROLE, assertion.getRole());
assertionsArray.add(structAssertion);
}
appendArray(struct, ATTR_ASSERTIONS, assertionsArray);
}
appendObject(struct, ATTR_MODIFIED, policy.getModified());
appendObject(struct, ATTR_NAME, policy.getName());
return struct;
}
private static Struct asStruct(com.yahoo.athenz.zts.Policy policy) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
List<com.yahoo.athenz.zts.Assertion> assertions = policy.getAssertions();
if (assertions != null && !assertions.isEmpty()) {
Array assertionsArray = new Array();
for (com.yahoo.athenz.zts.Assertion assertion : assertions) {
Struct structAssertion = new Struct();
appendObject(structAssertion, ATTR_ACTION, assertion.getAction());
appendObject(structAssertion, ATTR_EFFECT, assertion.getEffect());
appendObject(structAssertion, ATTR_RESOURCE, assertion.getResource());
appendObject(structAssertion, ATTR_ROLE, assertion.getRole());
assertionsArray.add(structAssertion);
}
appendArray(struct, ATTR_ASSERTIONS, assertionsArray);
}
appendObject(struct, ATTR_MODIFIED, policy.getModified());
appendObject(struct, ATTR_NAME, policy.getName());
return struct;
}
private static Struct asStruct(Role role) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_AUDIT_ENABLED, role.getAuditEnabled());
appendList(struct, ATTR_MEMBERS, role.getMembers());
appendObject(struct, ATTR_MODIFIED, role.getModified());
appendObject(struct, ATTR_NAME, role.getName());
List<RoleMember> roleMembers = role.getRoleMembers();
if (roleMembers != null) {
Array roleMembersArray = new Array();
for (RoleMember roleMember : roleMembers) {
Struct structRoleMember = new Struct();
appendObject(structRoleMember, ATTR_EXPIRATION, roleMember.getExpiration());
appendObject(structRoleMember, ATTR_MEMBER_NAME, roleMember.getMemberName());
roleMembersArray.add(structRoleMember);
}
appendArray(struct, ATTR_ROLE_MEMBERS, roleMembersArray);
}
appendObject(struct, ATTR_TRUST, role.getTrust());
return struct;
}
private static Struct asStruct(ServiceIdentity service) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_DESCRIPTION, service.getDescription());
appendObject(struct, ATTR_EXECUTABLE, service.getExecutable());
appendObject(struct, ATTR_GROUP, service.getGroup());
appendList(struct, ATTR_HOSTS, service.getHosts());
appendObject(struct, ATTR_MODIFIED, service.getModified());
appendObject(struct, ATTR_NAME, service.getName());
appendObject(struct, ATTR_PROVIDER_ENDPOINT, service.getProviderEndpoint());
List<PublicKeyEntry> publicKeys = service.getPublicKeys();
Array publicKeysArray = new Array();
if (publicKeys != null) {
for (PublicKeyEntry publicKey : publicKeys) {
Struct structPublicKey = new Struct();
appendObject(structPublicKey, ATTR_ID, publicKey.getId());
appendObject(structPublicKey, ATTR_KEY, publicKey.getKey());
publicKeysArray.add(structPublicKey);
}
}
appendArray(struct, ATTR_PUBLIC_KEYS, publicKeysArray);
appendObject(struct, ATTR_USER, service.getUser());
return struct;
}
private static void appendList(Struct struct, String name, List<String> list) {
if (list == null) {
return;
}
Array items = new Array();
items.addAll(list);
appendArray(struct, name, items);
}
private static void appendObject(Struct struct, String name, Object value) {
if (value == null) {
return;
}
if (value instanceof Struct) {
struct.append(name, value);
} else if (value instanceof String) {
struct.append(name, value);
} else if (value instanceof Integer) {
struct.append(name, value);
} else if (value instanceof Boolean) {
struct.append(name, value);
} else {
struct.append(name, value.toString());
}
}
private static void appendArray(Struct struct, String name, Array array) {
struct.append(name, array);
}
private static Object asStruct(PolicyData policyData) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_DOMAIN, policyData.getDomain());
List<com.yahoo.athenz.zts.Policy> policies = policyData.getPolicies();
Array policiesArray = new Array();
if (policies != null) {
for (com.yahoo.athenz.zts.Policy policy : policies) {
policiesArray.add(asStruct(policy));
}
}
appendArray(struct, ATTR_POLICIES, policiesArray);
return struct;
}
private static Object asStruct(SignedPolicyData signedPolicyData) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_EXPIRES, signedPolicyData.getExpires());
appendObject(struct, ATTR_MODIFIED, signedPolicyData.getModified());
appendObject(struct, ATTR_POLICY_DATA, asStruct(signedPolicyData.getPolicyData()));
appendObject(struct, ATTR_ZMS_KEY_ID, signedPolicyData.getZmsKeyId());
appendObject(struct, ATTR_ZMS_SIGNATURE, signedPolicyData.getZmsSignature());
return struct;
}
private static Struct asStruct(DomainData domainData) {
// all of our fields are in canonical order based
// on their attribute name
Struct struct = new Struct();
appendObject(struct, ATTR_ACCOUNT, domainData.getAccount());
appendObject(struct, ATTR_AUDIT_ENABLED, domainData.getAuditEnabled());
appendObject(struct, ATTR_CERT_DNS_DOMAIN, domainData.getCertDnsDomain());
appendObject(struct, ATTR_ENABLED, domainData.getEnabled());
appendObject(struct, ATTR_MODIFIED, domainData.getModified());
appendObject(struct, ATTR_NAME, domainData.getName());
SignedPolicies signedPolicies = domainData.getPolicies();
if (signedPolicies != null) {
Struct structSignedPolicies = new Struct();
appendObject(structSignedPolicies, ATTR_CONTENTS, asStruct(signedPolicies.getContents()));
appendObject(structSignedPolicies, ATTR_KEYID, signedPolicies.getKeyId());
appendObject(struct, ATTR_POLICIES, structSignedPolicies);
appendObject(structSignedPolicies, ATTR_SIGNATURE, signedPolicies.getSignature());
}
Array structRoles = new Array();
if (domainData.getRoles() != null) {
for (Role role : domainData.getRoles()) {
structRoles.add(asStruct(role));
}
}
appendArray(struct, ATTR_ROLES, structRoles);
Array structServices = new Array();
if (domainData.getServices() != null) {
for (ServiceIdentity service : domainData.getServices()) {
structServices.add(asStruct(service));
}
}
appendArray(struct, ATTR_SERVICES, structServices);
appendObject(struct, ATTR_YPMID, domainData.getYpmId());
return struct;
}
private static void appendSeparator(StringBuilder strBuffer) {
// if we have more than a single character
// (which is our initial {/[ character)
// in our buffer then we need to separate
// the item with a comma
if (strBuffer.length() != 1) {
strBuffer.append(',');
}
}
static String asCanonicalString(Object obj) {
StringBuilder strBuffer = new StringBuilder();
if (obj instanceof Struct) {
Struct struct = (Struct) obj;
strBuffer.append('{');
for (String name : struct.sortedNames()) {
appendSeparator(strBuffer);
strBuffer.append('"');
strBuffer.append(name);
strBuffer.append("\":");
strBuffer.append(asCanonicalString(struct.get(name)));
}
strBuffer.append('}');
} else if (obj instanceof Array) {
strBuffer.append('[');
for (Object item : (Array) obj) {
appendSeparator(strBuffer);
strBuffer.append(asCanonicalString(item));
}
strBuffer.append(']');
} else if (obj instanceof String) {
strBuffer.append('"');
strBuffer.append(obj);
strBuffer.append('"');
} else if (obj instanceof Integer) {
strBuffer.append(obj);
} else if (obj instanceof Long) {
strBuffer.append(obj);
} else if (obj instanceof Boolean) {
strBuffer.append(obj);
} else {
strBuffer.append(obj.toString());
}
return strBuffer.toString();
}
public static String asCanonicalString(PolicyData policyData) {
return asCanonicalString(asStruct(policyData));
}
public static String asCanonicalString(DomainData domainData) {
return asCanonicalString(asStruct(domainData));
}
public static String asCanonicalString(DomainPolicies domainPolicies) {
return asCanonicalString(asStruct(domainPolicies));
}
public static String asCanonicalString(SignedPolicyData signedPolicyData) {
return asCanonicalString(asStruct(signedPolicyData));
}
}
| 1 | 4,849 | we should keep the camel case format - selfServe | AthenZ-athenz | java |
@@ -17,14 +17,15 @@ package patch
import (
"time"
+ "github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/utils"
- "google.golang.org/api/compute/v1"
+ api "google.golang.org/api/compute/v1"
)
type patchTestSetup struct {
testName string
image string
- startup *compute.MetadataItems
+ metadata []*api.MetadataItems
assertTimeout time.Duration
machineType string
} | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package patch
import (
"time"
"github.com/GoogleCloudPlatform/compute-image-tools/osconfig_tests/utils"
"google.golang.org/api/compute/v1"
)
type patchTestSetup struct {
testName string
image string
startup *compute.MetadataItems
assertTimeout time.Duration
machineType string
}
var (
windowsRecordBoot = `
while ($true) {
$uri = 'http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/osconfig_tests/boot_count'
$old = Invoke-RestMethod -Method GET -Uri $uri -Headers @{"Metadata-Flavor" = "Google"}
$new = $old+1
try {
Invoke-RestMethod -Method PUT -Uri $uri -Headers @{"Metadata-Flavor" = "Google"} -Body $new -ErrorAction Stop
}
catch {
Write-Output $_.Exception.Message
Start-Sleep 1
continue
}
break
}
`
windowsSetWsus = `
$wu_server = 'wsus-server.c.compute-image-osconfig-agent.internal'
$windows_update_path = 'HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate'
$windows_update_au_path = "$windows_update_path\AU"
if (-not (Test-Path $windows_update_path)) {
New-Item -Path $windows_update_path -Value ""
New-Item -Path $windows_update_au_path -Value ""
}
if (-not (Get-ItemProperty -Path $windows_update_path -Name WUServer)) {
Set-ItemProperty -Path $windows_update_path -Name WUServer -Value "http://${wu_server}:8530"
Set-ItemProperty -Path $windows_update_path -Name WUStatusServer -Value "http://${wu_server}:8530"
Set-ItemProperty -Path $windows_update_au_path -Name UseWUServer -Value 1
Restart-Service wuauserv
}
`
windowsStartup = windowsRecordBoot + windowsSetWsus + utils.InstallOSConfigGooGet
linuxRecordBoot = `
uri=http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/osconfig_tests/boot_count
old=$(curl $uri -H "Metadata-Flavor: Google" -f)
new=$(($old + 1))
curl -X PUT --data "${new}" $uri -H "Metadata-Flavor: Google"
`
aptStartup = linuxRecordBoot + utils.InstallOSConfigDeb
el6Startup = linuxRecordBoot + utils.InstallOSConfigYumEL6
el7Startup = linuxRecordBoot + "yum install -y yum-utils\n" + utils.InstallOSConfigYumEL7
windowsSetup = &patchTestSetup{
assertTimeout: 60 * time.Minute,
startup: &compute.MetadataItems{
Key: "windows-startup-script-ps1",
Value: &windowsStartup,
},
machineType: "n1-standard-4",
}
aptSetup = &patchTestSetup{
assertTimeout: 5 * time.Minute,
startup: &compute.MetadataItems{
Key: "startup-script",
Value: &aptStartup,
},
machineType: "n1-standard-2",
}
el6Setup = &patchTestSetup{
assertTimeout: 5 * time.Minute,
startup: &compute.MetadataItems{
Key: "startup-script",
Value: &el6Startup,
},
machineType: "n1-standard-2",
}
el7Setup = &patchTestSetup{
assertTimeout: 5 * time.Minute,
startup: &compute.MetadataItems{
Key: "startup-script",
Value: &el7Startup,
},
machineType: "n1-standard-2",
}
el8Setup = &patchTestSetup{
assertTimeout: 5 * time.Minute,
startup: &compute.MetadataItems{
Key: "startup-script",
Value: &el7Startup,
},
machineType: "n1-standard-2",
}
)
func imageTestSetup(mapping map[*patchTestSetup]map[string]string) (setup []*patchTestSetup) {
for s, m := range mapping {
for name, image := range m {
new := patchTestSetup(*s)
new.testName = name
new.image = image
setup = append(setup, &new)
}
}
return
}
func headImageTestSetup() []*patchTestSetup {
// This maps a specific patchTestSetup to test setup names and associated images.
mapping := map[*patchTestSetup]map[string]string{
windowsSetup: utils.HeadWindowsImages,
el6Setup: utils.HeadEL6Images,
el7Setup: utils.HeadEL7Images,
el8Setup: utils.HeadEL8Images,
aptSetup: utils.HeadAptImages,
}
return imageTestSetup(mapping)
}
func oldImageTestSetup() []*patchTestSetup {
// This maps a specific patchTestSetup to test setup names and associated images.
mapping := map[*patchTestSetup]map[string]string{
windowsSetup: utils.OldWindowsImages,
el6Setup: utils.OldEL6Images,
el7Setup: utils.OldEL7Images,
aptSetup: utils.OldAptImages,
}
return imageTestSetup(mapping)
}
func aptHeadImageTestSetup() []*patchTestSetup {
// This maps a specific patchTestSetup to test setup names and associated images.
mapping := map[*patchTestSetup]map[string]string{
aptSetup: utils.HeadAptImages,
}
return imageTestSetup(mapping)
}
func yumHeadImageTestSetup() []*patchTestSetup {
// This maps a specific patchTestSetup to test setup names and associated images.
mapping := map[*patchTestSetup]map[string]string{
el6Setup: utils.HeadEL6Images,
el7Setup: utils.HeadEL7Images,
el8Setup: utils.HeadEL8Images,
}
return imageTestSetup(mapping)
}
| 1 | 8,940 | consider the alias `compute` in case this ever references other apis. | GoogleCloudPlatform-compute-image-tools | go |
@@ -392,3 +392,7 @@ func (m *manager) GetFreezerState() (configs.FreezerState, error) {
}
return freezer.(*FreezerGroup).GetState(dir)
}
+
+func (m *manager) Exists() bool {
+ return cgroups.PathExists(m.paths["devices"])
+} | 1 | // +build linux
package fs
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
var (
subsystemsLegacy = subsystemSet{
&CpusetGroup{},
&DevicesGroup{},
&MemoryGroup{},
&CpuGroup{},
&CpuacctGroup{},
&PidsGroup{},
&BlkioGroup{},
&HugetlbGroup{},
&NetClsGroup{},
&NetPrioGroup{},
&PerfEventGroup{},
&FreezerGroup{},
&NameGroup{GroupName: "name=systemd", Join: true},
}
HugePageSizes, _ = cgroups.GetHugePageSize()
)
var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist")
type subsystemSet []subsystem
func (s subsystemSet) Get(name string) (subsystem, error) {
for _, ss := range s {
if ss.Name() == name {
return ss, nil
}
}
return nil, errSubsystemDoesNotExist
}
type subsystem interface {
// Name returns the name of the subsystem.
Name() string
// Returns the stats, as 'stats', corresponding to the cgroup under 'path'.
GetStats(path string, stats *cgroups.Stats) error
// Removes the cgroup represented by 'cgroupData'.
Remove(*cgroupData) error
// Creates and joins the cgroup represented by 'cgroupData'.
Apply(*cgroupData) error
// Set the cgroup represented by cgroup.
Set(path string, cgroup *configs.Cgroup) error
}
type manager struct {
mu sync.Mutex
cgroups *configs.Cgroup
rootless bool // ignore permission-related errors
paths map[string]string
}
func NewManager(cg *configs.Cgroup, paths map[string]string, rootless bool) cgroups.Manager {
return &manager{
cgroups: cg,
paths: paths,
rootless: rootless,
}
}
// The absolute path to the root of the cgroup hierarchies.
var cgroupRootLock sync.Mutex
var cgroupRoot string
// Gets the cgroupRoot.
func getCgroupRoot() (string, error) {
cgroupRootLock.Lock()
defer cgroupRootLock.Unlock()
if cgroupRoot != "" {
return cgroupRoot, nil
}
root, err := cgroups.FindCgroupMountpointDir()
if err != nil {
return "", err
}
if _, err := os.Stat(root); err != nil {
return "", err
}
cgroupRoot = root
return cgroupRoot, nil
}
type cgroupData struct {
root string
innerPath string
config *configs.Cgroup
pid int
}
// isIgnorableError returns whether err is a permission error (in the loose
// sense of the word). This includes EROFS (which for an unprivileged user is
// basically a permission error) and EACCES (for similar reasons) as well as
// the normal EPERM.
func isIgnorableError(rootless bool, err error) bool {
// We do not ignore errors if we are root.
if !rootless {
return false
}
// TODO: rm errors.Cause once we switch to %w everywhere
err = errors.Cause(err)
// Is it an ordinary EPERM?
if errors.Is(err, os.ErrPermission) {
return true
}
// Handle some specific syscall errors.
var errno unix.Errno
if errors.As(err, &errno) {
return errno == unix.EROFS || errno == unix.EPERM || errno == unix.EACCES
}
return false
}
func (m *manager) getSubsystems() subsystemSet {
return subsystemsLegacy
}
func (m *manager) Apply(pid int) (err error) {
if m.cgroups == nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
var c = m.cgroups
d, err := getCgroupData(m.cgroups, pid)
if err != nil {
return err
}
m.paths = make(map[string]string)
if c.Paths != nil {
for name, path := range c.Paths {
_, err := d.path(name)
if err != nil {
if cgroups.IsNotFound(err) {
continue
}
return err
}
m.paths[name] = path
}
return cgroups.EnterPid(m.paths, pid)
}
for _, sys := range m.getSubsystems() {
p, err := d.path(sys.Name())
if err != nil {
// The non-presence of the devices subsystem is
// considered fatal for security reasons.
if cgroups.IsNotFound(err) && sys.Name() != "devices" {
continue
}
return err
}
m.paths[sys.Name()] = p
if err := sys.Apply(d); err != nil {
// In the case of rootless (including euid=0 in userns), where an
// explicit cgroup path hasn't been set, we don't bail on error in
// case of permission problems. Cases where limits have been set
// (and we couldn't create our own cgroup) are handled by Set.
if isIgnorableError(m.rootless, err) && m.cgroups.Path == "" {
delete(m.paths, sys.Name())
continue
}
return err
}
}
return nil
}
func (m *manager) Destroy() error {
if m.cgroups == nil || m.cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
if err := cgroups.RemovePaths(m.paths); err != nil {
return err
}
m.paths = make(map[string]string)
return nil
}
func (m *manager) Path(subsys string) string {
m.mu.Lock()
defer m.mu.Unlock()
return m.paths[subsys]
}
func (m *manager) GetStats() (*cgroups.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
stats := cgroups.NewStats()
for name, path := range m.paths {
sys, err := m.getSubsystems().Get(name)
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
continue
}
if err := sys.GetStats(path, stats); err != nil {
return nil, err
}
}
return stats, nil
}
func (m *manager) Set(container *configs.Config) error {
if container.Cgroups == nil {
return nil
}
// If Paths are set, then we are just joining cgroups paths
// and there is no need to set any values.
if m.cgroups != nil && m.cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
for _, sys := range m.getSubsystems() {
path := m.paths[sys.Name()]
if err := sys.Set(path, container.Cgroups); err != nil {
if m.rootless && sys.Name() == "devices" {
continue
}
// When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.
// However, errors from other subsystems are not ignored.
// see @test "runc create (rootless + limits + no cgrouppath + no permission) fails with informative error"
if path == "" {
// We never created a path for this cgroup, so we cannot set
// limits for it (though we have already tried at this point).
return fmt.Errorf("cannot set %s limit: container could not join or create cgroup", sys.Name())
}
return err
}
}
return nil
}
// Freeze toggles the container's freezer cgroup depending on the state
// provided
func (m *manager) Freeze(state configs.FreezerState) (Err error) {
path := m.GetPaths()["freezer"]
if m.cgroups == nil || path == "" {
return errors.New("cannot toggle freezer: cgroups not configured for container")
}
prevState := m.cgroups.Resources.Freezer
m.cgroups.Resources.Freezer = state
defer func() {
if Err != nil {
m.cgroups.Resources.Freezer = prevState
}
}()
freezer, err := m.getSubsystems().Get("freezer")
if err != nil {
return err
}
if err := freezer.Set(path, m.cgroups); err != nil {
return err
}
return nil
}
func (m *manager) GetPids() ([]int, error) {
return cgroups.GetPids(m.Path("devices"))
}
func (m *manager) GetAllPids() ([]int, error) {
return cgroups.GetAllPids(m.Path("devices"))
}
func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
root, err := getCgroupRoot()
if err != nil {
return nil, err
}
if (c.Name != "" || c.Parent != "") && c.Path != "" {
return nil, errors.New("cgroup: either Path or Name and Parent should be used")
}
// XXX: Do not remove this code. Path safety is important! -- cyphar
cgPath := libcontainerUtils.CleanPath(c.Path)
cgParent := libcontainerUtils.CleanPath(c.Parent)
cgName := libcontainerUtils.CleanPath(c.Name)
innerPath := cgPath
if innerPath == "" {
innerPath = filepath.Join(cgParent, cgName)
}
return &cgroupData{
root: root,
innerPath: innerPath,
config: c,
pid: pid,
}, nil
}
func (raw *cgroupData) path(subsystem string) (string, error) {
mnt, err := cgroups.FindCgroupMountpoint(raw.root, subsystem)
// If we didn't mount the subsystem, there is no point we make the path.
if err != nil {
return "", err
}
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
if filepath.IsAbs(raw.innerPath) {
// Sometimes subsystems can be mounted together as 'cpu,cpuacct'.
return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
}
// Use GetOwnCgroupPath instead of GetInitCgroupPath, because the creating
// process could in container and shared pid namespace with host, and
// /proc/1/cgroup could point to whole other world of cgroups.
parentPath, err := cgroups.GetOwnCgroupPath(subsystem)
if err != nil {
return "", err
}
return filepath.Join(parentPath, raw.innerPath), nil
}
func (raw *cgroupData) join(subsystem string) (string, error) {
path, err := raw.path(subsystem)
if err != nil {
return "", err
}
if err := os.MkdirAll(path, 0755); err != nil {
return "", err
}
if err := cgroups.WriteCgroupProc(path, raw.pid); err != nil {
return "", err
}
return path, nil
}
func removePath(p string, err error) error {
if err != nil {
return err
}
if p != "" {
return os.RemoveAll(p)
}
return nil
}
func (m *manager) GetPaths() map[string]string {
m.mu.Lock()
defer m.mu.Unlock()
return m.paths
}
func (m *manager) GetCgroups() (*configs.Cgroup, error) {
return m.cgroups, nil
}
func (m *manager) GetFreezerState() (configs.FreezerState, error) {
paths := m.GetPaths()
dir := paths["freezer"]
freezer, err := m.getSubsystems().Get("freezer")
// If the container doesn't have the freezer cgroup, say it's undefined.
if err != nil || dir == "" {
return configs.Undefined, nil
}
return freezer.(*FreezerGroup).GetState(dir)
}
| 1 | 19,359 | It probably doesn't matter in practice, but we're not supposed to access a map without holding a lock. This is why I have suggested using `m.Path("devices")` earlier -- it takes a lock before accessing m.paths. Alternatively, you can leave this code as is but add taking a lock (same as `Path()`). | opencontainers-runc | go |
@@ -33,6 +33,8 @@ let baseModuleStore = Modules.createModuleStore( 'tagmanager', {
'internalAMPContainerID',
'useSnippet',
'ownerID',
+ 'gaAMPPropertyID',
+ 'gaPropertyID',
],
submitChanges,
validateCanSubmitChanges, | 1 | /**
* `modules/tagmanager` base data store
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import Modules from 'googlesitekit-modules';
import { STORE_NAME } from './constants';
import { submitChanges, validateCanSubmitChanges } from './settings';
let baseModuleStore = Modules.createModuleStore( 'tagmanager', {
storeName: STORE_NAME,
settingSlugs: [
'accountID',
'ampContainerID',
'containerID',
'internalContainerID',
'internalAMPContainerID',
'useSnippet',
'ownerID',
],
submitChanges,
validateCanSubmitChanges,
} );
// Rename generated pieces to adhere to our convention.
baseModuleStore = ( ( { actions, selectors, ...store } ) => {
// eslint-disable-next-line sitekit/camelcase-acronyms
const { setAmpContainerID, ...restActions } = actions;
// eslint-disable-next-line sitekit/camelcase-acronyms
const { getAmpContainerID, ...restSelectors } = selectors;
return {
...store,
actions: {
...restActions,
// eslint-disable-next-line sitekit/camelcase-acronyms
setAMPContainerID: setAmpContainerID,
},
selectors: {
...restSelectors,
// eslint-disable-next-line sitekit/camelcase-acronyms
getAMPContainerID: getAmpContainerID,
},
};
} )( baseModuleStore );
export default baseModuleStore;
| 1 | 35,758 | Since `ga` is an acronym here, we should rename the generated actions and selectors to adhere to our naming conventions as it will no longer be capitalized properly. See below for how this is already done for the amp container ID settings. We should add `GA` to our list of checked acronyms as well in `packages/eslint-plugin/rules/acronym-case.js`. | google-site-kit-wp | js |
@@ -88,12 +88,11 @@ class TransloaditAssembly extends Emitter {
socket.on('assembly_upload_finished', (file) => {
this.emit('upload', file)
- this._fetchStatus({ diff: false })
+ this.status.uploads.push(file)
})
socket.on('assembly_uploading_finished', () => {
this.emit('executing')
- this._fetchStatus({ diff: false })
})
socket.on('assembly_upload_meta_data_extracted', () => { | 1 | const io = requireSocketIo
const Emitter = require('component-emitter')
const parseUrl = require('./parseUrl')
// Lazy load socket.io to avoid a console error
// in IE 10 when the Transloadit plugin is not used.
// (The console.error call comes from `buffer`. I
// think we actually don't use that part of socket.io
// at all…)
let socketIo
function requireSocketIo () {
if (!socketIo) {
socketIo = require('socket.io-client')
}
return socketIo
}
const ASSEMBLY_UPLOADING = 'ASSEMBLY_UPLOADING'
const ASSEMBLY_EXECUTING = 'ASSEMBLY_EXECUTING'
const ASSEMBLY_COMPLETED = 'ASSEMBLY_COMPLETED'
const statusOrder = [
ASSEMBLY_UPLOADING,
ASSEMBLY_EXECUTING,
ASSEMBLY_COMPLETED
]
/**
* Check that an assembly status is equal to or larger than some desired status.
* It checks for things that are larger so that a comparison like this works,
* when the old assembly status is UPLOADING but the new is FINISHED:
*
* !isStatus(oldStatus, ASSEMBLY_EXECUTING) && isStatus(newState, ASSEMBLY_EXECUTING)
*
* …so that we can emit the 'executing' event even if the execution step was so
* fast that we missed it.
*/
function isStatus (status, test) {
return statusOrder.indexOf(status) >= statusOrder.indexOf(test)
}
class TransloaditAssembly extends Emitter {
constructor (assembly) {
super()
// The current assembly status.
this.status = assembly
// The socket.io connection.
this.socket = null
// The interval timer for full status updates.
this.pollInterval = null
// Whether this assembly has been closed (finished or errored)
this.closed = false
}
connect () {
this._connectSocket()
this._beginPolling()
}
_onFinished () {
this.emit('finished')
this.close()
}
_connectSocket () {
const parsed = parseUrl(this.status.websocket_url)
const socket = io().connect(parsed.origin, {
transports: ['websocket'],
path: parsed.pathname
})
socket.on('connect', () => {
socket.emit('assembly_connect', {
id: this.status.assembly_id
})
this.emit('connect')
})
socket.on('error', () => {
socket.disconnect()
this.socket = null
})
socket.on('assembly_finished', () => {
this._onFinished()
})
socket.on('assembly_upload_finished', (file) => {
this.emit('upload', file)
this._fetchStatus({ diff: false })
})
socket.on('assembly_uploading_finished', () => {
this.emit('executing')
this._fetchStatus({ diff: false })
})
socket.on('assembly_upload_meta_data_extracted', () => {
this.emit('metadata')
this._fetchStatus({ diff: false })
})
socket.on('assembly_result_finished', (stepName, result) => {
this.emit('result', stepName, result)
this._fetchStatus({ diff: false })
})
socket.on('assembly_error', (err) => {
this._onError(err)
this._fetchStatus({ diff: false })
})
this.socket = socket
}
_onError (err) {
this.emit('error', Object.assign(new Error(err.message), err))
}
/**
* Begin polling for assembly status changes. This sends a request to the
* assembly status endpoint every so often, if the socket is not connected.
* If the socket connection fails or takes a long time, we won't miss any
* events.
*/
_beginPolling () {
this.pollInterval = setInterval(() => {
if (!this.socket || !this.socket.connected) {
this._fetchStatus()
}
}, 2000)
}
/**
* Reload assembly status. Useful if the socket doesn't work.
*
* Pass `diff: false` to avoid emitting diff events, instead only emitting
* 'status'.
*/
_fetchStatus ({ diff = true } = {}) {
return fetch(this.status.assembly_ssl_url)
.then((response) => response.json())
.then((status) => {
// Avoid updating if we closed during this request's lifetime.
if (this.closed) return
this.emit('status', status)
if (diff) {
this.updateStatus(status)
} else {
this.status = status
}
})
}
update () {
return this._fetchStatus({ diff: true })
}
/**
* Update this assembly's status with a full new object. Events will be
* emitted for status changes, new files, and new results.
*
* @param {Object} next The new assembly status object.
*/
updateStatus (next) {
this._diffStatus(this.status, next)
this.status = next
}
/**
* Diff two assembly statuses, and emit the events necessary to go from `prev`
* to `next`.
*
* @param {Object} prev The previous assembly status.
* @param {Object} next The new assembly status.
*/
_diffStatus (prev, next) {
const prevStatus = prev.ok
const nextStatus = next.ok
if (next.error && !prev.error) {
return this._onError(next)
}
// Desired emit order:
// - executing
// - (n × upload)
// - metadata
// - (m × result)
// - finished
// The below checks run in this order, that way even if we jump from
// UPLOADING straight to FINISHED all the events are emitted as expected.
const nowExecuting =
isStatus(nextStatus, ASSEMBLY_EXECUTING) &&
!isStatus(prevStatus, ASSEMBLY_EXECUTING)
if (nowExecuting) {
// Without WebSockets, this is our only way to tell if uploading finished.
// Hence, we emit this just before the 'upload's and before the 'metadata'
// event for the most intuitive ordering, corresponding to the _usual_
// ordering (if not guaranteed) that you'd get on the WebSocket.
this.emit('executing')
}
// Find new uploaded files.
Object.keys(next.uploads)
.filter((upload) => (
!prev.uploads.hasOwnProperty(upload)
))
.map((upload) => next.uploads[upload])
.forEach((upload) => {
this.emit('upload', upload)
})
if (nowExecuting) {
this.emit('metadata')
}
// Find new results.
Object.keys(next.results).forEach((stepName) => {
const nextResults = next.results[stepName]
const prevResults = prev.results[stepName]
nextResults
.filter((n) => !prevResults || !prevResults.some((p) => p.id === n.id))
.forEach((result) => {
this.emit('result', stepName, result)
})
})
if (isStatus(nextStatus, ASSEMBLY_COMPLETED) &&
!isStatus(prevStatus, ASSEMBLY_COMPLETED)) {
this.emit('finished')
}
}
/**
* Stop updating this assembly.
*/
close () {
this.closed = true
if (this.socket) {
this.socket.disconnect()
this.socket = null
}
clearInterval(this.pollInterval)
}
}
module.exports = TransloaditAssembly
| 1 | 12,203 | uploading_finished and upload_meta_data_extracted can fire very quickly after another, and there is not much difference in the Assembly status that's useful to us. I kept only the Assembly fetch after metadata is extracted, which ensures that we'll have all the correct `uploads.*.meta` properties on the client side. | transloadit-uppy | js |
@@ -53,7 +53,10 @@ func (m *manager) getControllers() error {
file := filepath.Join(m.dirPath, "cgroup.controllers")
data, err := ioutil.ReadFile(file)
- if err != nil && !m.rootless {
+ if err != nil {
+ if m.rootless && m.config.Path == "" {
+ return nil
+ }
return err
}
fields := strings.Fields(string(data)) | 1 | // +build linux
package fs2
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/pkg/errors"
)
type manager struct {
config *configs.Cgroup
// dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope"
dirPath string
// controllers is content of "cgroup.controllers" file.
// excludes pseudo-controllers ("devices" and "freezer").
controllers map[string]struct{}
rootless bool
}
// NewManager creates a manager for cgroup v2 unified hierarchy.
// dirPath is like "/sys/fs/cgroup/user.slice/user-1001.slice/session-1.scope".
// If dirPath is empty, it is automatically set using config.
func NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {
if config == nil {
config = &configs.Cgroup{}
}
if dirPath == "" {
var err error
dirPath, err = defaultDirPath(config)
if err != nil {
return nil, err
}
}
m := &manager{
config: config,
dirPath: dirPath,
rootless: rootless,
}
return m, nil
}
func (m *manager) getControllers() error {
if m.controllers != nil {
return nil
}
file := filepath.Join(m.dirPath, "cgroup.controllers")
data, err := ioutil.ReadFile(file)
if err != nil && !m.rootless {
return err
}
fields := strings.Fields(string(data))
m.controllers = make(map[string]struct{}, len(fields))
for _, c := range fields {
m.controllers[c] = struct{}{}
}
return nil
}
func (m *manager) Apply(pid int) error {
if err := CreateCgroupPath(m.dirPath, m.config); err != nil {
return err
}
if err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless {
return err
}
return nil
}
func (m *manager) GetPids() ([]int, error) {
return cgroups.GetPids(m.dirPath)
}
func (m *manager) GetAllPids() ([]int, error) {
return cgroups.GetAllPids(m.dirPath)
}
func (m *manager) GetStats() (*cgroups.Stats, error) {
var (
errs []error
)
st := cgroups.NewStats()
if err := m.getControllers(); err != nil {
return st, err
}
// pids (since kernel 4.5)
if _, ok := m.controllers["pids"]; ok {
if err := statPids(m.dirPath, st); err != nil {
errs = append(errs, err)
}
} else {
if err := statPidsWithoutController(m.dirPath, st); err != nil {
errs = append(errs, err)
}
}
// memory (since kernel 4.5)
if _, ok := m.controllers["memory"]; ok {
if err := statMemory(m.dirPath, st); err != nil {
errs = append(errs, err)
}
}
// io (since kernel 4.5)
if _, ok := m.controllers["io"]; ok {
if err := statIo(m.dirPath, st); err != nil {
errs = append(errs, err)
}
}
// cpu (since kernel 4.15)
if _, ok := m.controllers["cpu"]; ok {
if err := statCpu(m.dirPath, st); err != nil {
errs = append(errs, err)
}
}
// hugetlb (since kernel 5.6)
if _, ok := m.controllers["hugetlb"]; ok {
if err := statHugeTlb(m.dirPath, st); err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 && !m.rootless {
return st, errors.Errorf("error while statting cgroup v2: %+v", errs)
}
return st, nil
}
func (m *manager) Freeze(state configs.FreezerState) error {
if err := setFreezer(m.dirPath, state); err != nil {
return err
}
m.config.Resources.Freezer = state
return nil
}
func (m *manager) Destroy() error {
if err := os.Remove(m.dirPath); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// GetPaths is for compatibility purpose and should be removed in future
func (m *manager) GetPaths() map[string]string {
_ = m.getControllers()
paths := map[string]string{
// pseudo-controller for compatibility
"devices": m.dirPath,
"freezer": m.dirPath,
}
for c := range m.controllers {
paths[c] = m.dirPath
}
return paths
}
func (m *manager) GetUnifiedPath() (string, error) {
return m.dirPath, nil
}
func (m *manager) Set(container *configs.Config) error {
if container == nil || container.Cgroups == nil {
return nil
}
if err := m.getControllers(); err != nil {
return err
}
var errs []error
// pids (since kernel 4.5)
if _, ok := m.controllers["pids"]; ok {
if err := setPids(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// memory (since kernel 4.5)
if _, ok := m.controllers["memory"]; ok {
if err := setMemory(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// io (since kernel 4.5)
if _, ok := m.controllers["io"]; ok {
if err := setIo(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// cpu (since kernel 4.15)
if _, ok := m.controllers["cpu"]; ok {
if err := setCpu(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// devices (since kernel 4.15, pseudo-controller)
if err := setDevices(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
// cpuset (since kernel 5.0)
if _, ok := m.controllers["cpuset"]; ok {
if err := setCpuset(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// hugetlb (since kernel 5.6)
if _, ok := m.controllers["hugetlb"]; ok {
if err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {
errs = append(errs, err)
}
}
// freezer (since kernel 5.2, pseudo-controller)
if err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 && !m.rootless {
return errors.Errorf("error while setting cgroup v2: %+v", errs)
}
m.config = container.Cgroups
return nil
}
func (m *manager) GetCgroups() (*configs.Cgroup, error) {
return m.config, nil
}
| 1 | 19,372 | Does it make sense to keep trying to read the file every time the function is called, or should we maybe use `sync.Once()` here? | opencontainers-runc | go |
@@ -59,6 +59,7 @@ type Bee struct {
type Options struct {
DataDir string
+ DbCapacity uint64
Password string
APIAddr string
DebugAPIAddr string | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package node
import (
"context"
"fmt"
"io"
"log"
"net"
"net/http"
"path/filepath"
"sync"
"sync/atomic"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/hive"
"github.com/ethersphere/bee/pkg/kademlia"
"github.com/ethersphere/bee/pkg/keystore"
filekeystore "github.com/ethersphere/bee/pkg/keystore/file"
memkeystore "github.com/ethersphere/bee/pkg/keystore/mem"
"github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/pusher"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/retrieval"
"github.com/ethersphere/bee/pkg/statestore/leveldb"
mockinmem "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/validator"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type Bee struct {
p2pService io.Closer
p2pCancel context.CancelFunc
apiServer *http.Server
debugAPIServer *http.Server
errorLogWriter *io.PipeWriter
tracerCloser io.Closer
stateStoreCloser io.Closer
localstoreCloser io.Closer
topologyCloser io.Closer
pusherCloser io.Closer
}
type Options struct {
DataDir string
Password string
APIAddr string
DebugAPIAddr string
Addr string
DisableWS bool
DisableQUIC bool
NetworkID uint64
Bootnodes []string
Logger logging.Logger
TracingEnabled bool
TracingEndpoint string
TracingServiceName string
}
func NewBee(o Options) (*Bee, error) {
logger := o.Logger
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
ServiceName: o.TracingServiceName,
})
if err != nil {
return nil, fmt.Errorf("tracer: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
b := &Bee{
p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
tracerCloser: tracerCloser,
}
var keyStore keystore.Service
if o.DataDir == "" {
keyStore = memkeystore.New()
logger.Warning("data directory not provided, keys are not persisted")
} else {
keyStore = filekeystore.New(filepath.Join(o.DataDir, "keys"))
}
swarmPrivateKey, created, err := keyStore.Key("swarm", o.Password)
if err != nil {
return nil, fmt.Errorf("swarm key: %w", err)
}
address := crypto.NewOverlayAddress(swarmPrivateKey.PublicKey, o.NetworkID)
if created {
logger.Info("new swarm key created")
}
logger.Infof("address: %s", address)
// Construct P2P service.
libp2pPrivateKey, created, err := keyStore.Key("libp2p", o.Password)
if err != nil {
return nil, fmt.Errorf("libp2p key: %w", err)
}
if created {
logger.Infof("new libp2p key created")
}
var stateStore storage.StateStorer
if o.DataDir == "" {
stateStore = mockinmem.NewStateStore()
logger.Warning("using in-mem state store. no node state will be persisted")
} else {
stateStore, err = leveldb.NewStateStore(filepath.Join(o.DataDir, "statestore"))
if err != nil {
return nil, fmt.Errorf("statestore: %w", err)
}
}
b.stateStoreCloser = stateStore
addressbook := addressbook.New(stateStore)
signer := crypto.NewDefaultSigner(swarmPrivateKey)
p2ps, err := libp2p.New(p2pCtx, signer, o.NetworkID, address, o.Addr, libp2p.Options{
PrivateKey: libp2pPrivateKey,
DisableWS: o.DisableWS,
DisableQUIC: o.DisableQUIC,
Addressbook: addressbook,
Logger: logger,
Tracer: tracer,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
// Construct protocols.
pingPong := pingpong.New(pingpong.Options{
Streamer: p2ps,
Logger: logger,
Tracer: tracer,
})
if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
return nil, fmt.Errorf("pingpong service: %w", err)
}
hive := hive.New(hive.Options{
Streamer: p2ps,
AddressBook: addressbook,
NetworkID: o.NetworkID,
Logger: logger,
})
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
}
topologyDriver := kademlia.New(kademlia.Options{Base: address, Discovery: hive, AddressBook: addressbook, P2P: p2ps, Logger: logger})
b.topologyCloser = topologyDriver
hive.SetPeerAddedHandler(topologyDriver.AddPeer)
p2ps.SetNotifier(topologyDriver)
addrs, err := p2ps.Addresses()
if err != nil {
return nil, fmt.Errorf("get server addresses: %w", err)
}
for _, addr := range addrs {
logger.Infof("p2p address: %s", addr)
}
var (
storer storage.Storer
path = ""
)
if o.DataDir != "" {
path = filepath.Join(o.DataDir, "localstore")
}
storer, err = localstore.New(path, address.Bytes(), nil, logger)
if err != nil {
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
retrieve := retrieval.New(retrieval.Options{
Streamer: p2ps,
ChunkPeerer: topologyDriver,
Storer: storer,
Logger: logger,
})
if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err)
}
ns := netstore.New(storer, retrieve, validator.NewContentAddressValidator())
pushSyncProtocol := pushsync.New(pushsync.Options{
Streamer: p2ps,
Storer: storer,
ClosestPeerer: topologyDriver,
Logger: logger,
})
if err = p2ps.AddProtocol(pushSyncProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("pushsync service: %w", err)
}
pushSyncPusher := pusher.New(pusher.Options{
Storer: storer,
PeerSuggester: topologyDriver,
PushSyncer: pushSyncProtocol,
Logger: logger,
})
b.pusherCloser = pushSyncPusher
var apiService api.Service
if o.APIAddr != "" {
// API server
apiService = api.New(api.Options{
Pingpong: pingPong,
Storer: ns,
Logger: logger,
Tracer: tracer,
})
apiListener, err := net.Listen("tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
apiServer := &http.Server{
Handler: apiService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("api address: %s", apiListener.Addr())
if err := apiServer.Serve(apiListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("api server: %v", err)
logger.Error("unable to serve api")
}
}()
b.apiServer = apiServer
}
if o.DebugAPIAddr != "" {
// Debug API server
debugAPIService := debugapi.New(debugapi.Options{
Overlay: address,
P2P: p2ps,
Logger: logger,
Addressbook: addressbook,
TopologyDriver: topologyDriver,
Storer: storer,
})
// register metrics from components
debugAPIService.MustRegisterMetrics(p2ps.Metrics()...)
debugAPIService.MustRegisterMetrics(pingPong.Metrics()...)
if apiService != nil {
debugAPIService.MustRegisterMetrics(apiService.Metrics()...)
}
if l, ok := logger.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
debugAPIListener, err := net.Listen("tcp", o.DebugAPIAddr)
if err != nil {
return nil, fmt.Errorf("debug api listener: %w", err)
}
debugAPIServer := &http.Server{
Handler: debugAPIService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("debug api address: %s", debugAPIListener.Addr())
if err := debugAPIServer.Serve(debugAPIListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("debug api server: %v", err)
logger.Error("unable to serve debug api")
}
}()
b.debugAPIServer = debugAPIServer
}
overlays, err := addressbook.Overlays()
if err != nil {
return nil, fmt.Errorf("addressbook overlays: %w", err)
}
var count int32
var wg sync.WaitGroup
jobsC := make(chan struct{}, 16)
for _, o := range overlays {
jobsC <- struct{}{}
wg.Add(1)
go func(overlay swarm.Address) {
defer func() {
<-jobsC
}()
defer wg.Done()
if err := topologyDriver.AddPeer(p2pCtx, overlay); err != nil {
logger.Debugf("topology add peer fail %s: %v", overlay, err)
logger.Errorf("topology add peer %s", overlay)
return
}
atomic.AddInt32(&count, 1)
}(o)
}
wg.Wait()
// Connect bootnodes if no nodes from the addressbook was sucesufully added to topology
if count == 0 {
for _, a := range o.Bootnodes {
wg.Add(1)
go func(a string) {
defer wg.Done()
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Errorf("connect to bootnode %s", a)
return
}
bzzAddr, err := p2ps.Connect(p2pCtx, addr)
if err != nil {
logger.Debugf("connect fail %s: %v", a, err)
logger.Errorf("connect to bootnode %s", a)
return
}
err = addressbook.Put(bzzAddr.Overlay, *bzzAddr)
if err != nil {
_ = p2ps.Disconnect(bzzAddr.Overlay)
logger.Debugf("addressbook error persisting %s %s: %v", a, bzzAddr.Overlay, err)
logger.Errorf("connect to bootnode %s", a)
return
}
if err := topologyDriver.Connected(p2pCtx, bzzAddr.Overlay); err != nil {
_ = p2ps.Disconnect(bzzAddr.Overlay)
logger.Debugf("topology connected fail %s %s: %v", a, bzzAddr.Overlay, err)
logger.Errorf("connect to bootnode %s", a)
return
}
}(a)
}
wg.Wait()
}
return b, nil
}
func (b *Bee) Shutdown(ctx context.Context) error {
var eg errgroup.Group
if b.apiServer != nil {
eg.Go(func() error {
if err := b.apiServer.Shutdown(ctx); err != nil {
return fmt.Errorf("api server: %w", err)
}
return nil
})
}
if b.debugAPIServer != nil {
eg.Go(func() error {
if err := b.debugAPIServer.Shutdown(ctx); err != nil {
return fmt.Errorf("debug api server: %w", err)
}
return nil
})
}
if err := eg.Wait(); err != nil {
return err
}
if err := b.pusherCloser.Close(); err != nil {
return fmt.Errorf("pusher: %w", err)
}
b.p2pCancel()
if err := b.p2pService.Close(); err != nil {
return fmt.Errorf("p2p server: %w", err)
}
if err := b.tracerCloser.Close(); err != nil {
return fmt.Errorf("tracer: %w", err)
}
if err := b.stateStoreCloser.Close(); err != nil {
return fmt.Errorf("statestore: %w", err)
}
if err := b.localstoreCloser.Close(); err != nil {
return fmt.Errorf("localstore: %w", err)
}
if err := b.topologyCloser.Close(); err != nil {
return fmt.Errorf("topology driver: %w", err)
}
return b.errorLogWriter.Close()
}
| 1 | 10,304 | `DbCapacity` -> `DBCapacity` | ethersphere-bee | go |
@@ -56,8 +56,10 @@ namespace Nethermind.Network.Test
[TestCase(0, "0xa3f5ab08", 1_561_651L, "Unsynced")]
[TestCase(1_561_650L, "0xa3f5ab08", 1_561_651L, "Last Constantinople block")]
[TestCase(1_561_651L, "0xc25efa5c", 4_460_644L, "First Istanbul block")]
- [TestCase(4_460_644L, "0x757a1c47", 0L, "First Berlin block")]
- [TestCase(6_000_000L, "0x757a1c47", 0L, "Future Berlin block")]
+ [TestCase(4_460_644L, "0x757a1c47", 5_062_605, "First Berlin block")]
+ [TestCase(4_600_000L, "0x757a1c47", 5_062_605, "Future Berlin block")]
+ [TestCase(5_062_605L, "0xB8C6299D", 0L, "First London block")]
+ [TestCase(5_979_794L, "0xB8C6299D", 0L, "Future London block")]
public void Fork_id_and_hash_as_expected_on_goerli(long head, string forkHashHex, long next, string description)
{
Test(head, KnownHashes.GoerliGenesis, forkHashHex, next, description, GoerliSpecProvider.Instance, "goerli.json"); | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.IO;
using FluentAssertions;
using Nethermind.Core.Crypto;
using Nethermind.Core.Extensions;
using Nethermind.Core.Specs;
using Nethermind.Serialization.Json;
using Nethermind.Specs;
using Nethermind.Specs.ChainSpecStyle;
using NUnit.Framework;
namespace Nethermind.Network.Test
{
[Parallelizable(ParallelScope.Self)]
[TestFixture]
public class ForkInfoTests
{
[TestCase(0, "0xfc64ec04", 1_150_000, "Unsynced")]
[TestCase(1_149_999, "0xfc64ec04", 1_150_000, "Last Frontier block")]
[TestCase(1_150_000, "0x97c2c34c", 1_920_000, "First Homestead block")]
[TestCase(1_919_999, "0x97c2c34c", 1_920_000, "Last Homestead block")]
[TestCase(1_920_000, "0x91d1f948", 2_463_000, "First DAO block")]
[TestCase(2_462_999, "0x91d1f948", 2_463_000, "Last DAO block")]
[TestCase(2_463_000, "0x7a64da13", 2_675_000, "First Tangerine block")]
[TestCase(2_674_999, "0x7a64da13", 2_675_000, "Last Tangerine block")]
[TestCase(2_675_000, "0x3edd5b10", 4_370_000, "First Spurious block")]
[TestCase(4_369_999, "0x3edd5b10", 4_370_000, "Last Spurious block")]
[TestCase(4_370_000, "0xa00bc324", 7_280_000, "First Byzantium block")]
[TestCase(7_279_999, "0xa00bc324", 7_280_000, "Last Byzantium block")]
[TestCase(7_280_000, "0x668db0af", 9_069_000, "First Constantinople block")]
[TestCase(9_068_999, "0x668db0af", 9_069_000, "Last Constantinople block")]
[TestCase(9_069_000, "0x879d6e30", 9_200_000, "First Istanbul block")]
[TestCase(9_199_999, "0x879d6e30", 9_200_000, "Last Istanbul block")]
[TestCase(9_200_000, "0xe029e991", 12_244_000, "Last Muir Glacier")]
[TestCase(12_244_000, "0x0eb440f6", 0, "First Berlin")]
public void Fork_id_and_hash_as_expected(long head, string forkHashHex, long next, string description)
{
Test(head, KnownHashes.MainnetGenesis, forkHashHex, next, description, MainnetSpecProvider.Instance, "foundation.json");
}
[TestCase(0, "0xa3f5ab08", 1_561_651L, "Unsynced")]
[TestCase(1_561_650L, "0xa3f5ab08", 1_561_651L, "Last Constantinople block")]
[TestCase(1_561_651L, "0xc25efa5c", 4_460_644L, "First Istanbul block")]
[TestCase(4_460_644L, "0x757a1c47", 0L, "First Berlin block")]
[TestCase(6_000_000L, "0x757a1c47", 0L, "Future Berlin block")]
public void Fork_id_and_hash_as_expected_on_goerli(long head, string forkHashHex, long next, string description)
{
Test(head, KnownHashes.GoerliGenesis, forkHashHex, next, description, GoerliSpecProvider.Instance, "goerli.json");
}
[TestCase(0, "0x3b8e0691", 1, "Unsynced, last Frontier block")]
[TestCase(1, "0x60949295", 2, "First and last Homestead block")]
[TestCase(2, "0x8bde40dd", 3, "First and last Tangerine block")]
[TestCase(3, "0xcb3a64bb", 1035301, "First Spurious block")]
[TestCase(1_035_300L, "0xcb3a64bb", 1_035_301L, "Last Spurious block")]
[TestCase(1_035_301L, "0x8d748b57", 3_660_663L, "First Byzantium block")]
[TestCase(3_660_662L, "0x8d748b57", 3_660_663L, "Last Byzantium block")]
[TestCase(3_660_663L, "0xe49cab14", 4_321_234L, "First Constantinople block")]
[TestCase(4_321_233L, "0xe49cab14", 4_321_234L, "Last Constantinople block")]
[TestCase(4_321_234L, "0xafec6b27", 5_435_345L, "First Petersburg block")]
[TestCase(5_435_344L, "0xafec6b27", 5_435_345L, "Last Petersburg block")]
[TestCase(5_435_345L, "0xcbdb8838", 8_290_928L, "First Istanbul block")]
[TestCase(8_290_928L, "0x6910c8bd", 0L, "First Berlin block")]
[TestCase(9_000_000L, "0x6910c8bd", 0L, "Future Berlin block")]
public void Fork_id_and_hash_as_expected_on_rinkeby(long head, string forkHashHex, long next, string description)
{
Test(head, KnownHashes.RinkebyGenesis, forkHashHex, next, description, RinkebySpecProvider.Instance, "rinkeby.json");
}
[TestCase(0, "0x30c7ddbc", 10, " Unsynced, last Frontier, Homestead and first Tangerine block")]
[TestCase(9, "0x30c7ddbc", 10, "Last Tangerine block")]
[TestCase(10, "0x63760190", 1_700_000L, "First Spurious block")]
[TestCase(1_699_999L, "0x63760190", 1_700_000L, "Last Spurious block")]
[TestCase(1_700_000L, "0x3ea159c7", 4_230_000L, "First Byzantium block")]
[TestCase(4_229_999L, "0x3ea159c7", 4_230_000L, "Last Byzantium block")]
[TestCase(4_230_000L, "0x97b544f3", 4_939_394L, "First Constantinople block")]
[TestCase(4_939_393L, "0x97b544f3", 4_939_394L, "Last Constantinople block")]
[TestCase(4_939_394L, "0xd6e2149b", 6_485_846L, "First Petersburg block")]
[TestCase(6_485_845L, "0xd6e2149b", 6_485_846L, "Last Petersburg block")]
[TestCase(6_485_846L, "0x4bc66396", 7_117_117L, "First Istanbul block")]
[TestCase(7_117_117L, "0x6727ef90", 9_812_189L, "First Muir Glacier block")]
[TestCase(9_812_189L, "0xa157d377", 0L, "First Berlin block")]
[TestCase(9_900_000L, "0xa157d377", 0L, "Future Berlin block")]
public void Fork_id_and_hash_as_expected_on_ropsten(long head, string forkHashHex, long next, string description)
{
Test(head, KnownHashes.RopstenGenesis, forkHashHex, next, description, RopstenSpecProvider.Instance, "ropsten.json");
}
private static void Test(long head, Keccak genesisHash, string forkHashHex, long next, string description, ISpecProvider specProvider, string chainSpec)
{
Test(head, genesisHash, forkHashHex, next, description, specProvider);
ChainSpecLoader loader = new ChainSpecLoader(new EthereumJsonSerializer());
ChainSpec spec = loader.Load(File.ReadAllText("../../../../Chains/" + chainSpec));
ChainSpecBasedSpecProvider provider = new ChainSpecBasedSpecProvider(spec);
Test(head, genesisHash, forkHashHex, next, description, provider);
}
private static void Test(long head, Keccak genesisHash, string forkHashHex, long next, string description, ISpecProvider specProvider)
{
byte[] expectedForkHash = Bytes.FromHexString(forkHashHex);
byte[] forkHash = ForkInfo.CalculateForkHash(specProvider, head, genesisHash);
forkHash.Should().BeEquivalentTo(expectedForkHash, description);
ForkId forkId = ForkInfo.CalculateForkId(specProvider, head, genesisHash);
forkId.Next.Should().Be(next);
forkId.ForkHash.Should().BeEquivalentTo(expectedForkHash);
}
}
}
| 1 | 25,403 | For Berlin I confirmed all the fork hashes with the Geth team. Would you do the same with Martin? He responded quickly the last time. | NethermindEth-nethermind | .cs |
@@ -63,6 +63,19 @@ func Wireguard() error {
return sh.RunV(cmdParts[0], cmdParts[1:]...)
}
+// SOCKS5 builds and starts SOCKS5 service with terms accepted
+func SOCKS5() error {
+ if err := sh.RunV("bin/build"); err != nil {
+ return err
+ }
+ cmd := "build/myst/myst service --agreed-terms-and-conditions socks5"
+ if runtime.GOOS == "darwin" {
+ cmd = "sudo " + cmd
+ }
+ cmdParts := strings.Split(cmd, " ")
+ return sh.RunV(cmdParts[0], cmdParts[1:]...)
+}
+
// CLI builds and runs myst CLI
func CLI() error {
if err := sh.RunV("bin/build"); err != nil { | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dev
import (
"runtime"
"strings"
"github.com/magefile/mage/sh"
)
// Daemon builds and runs myst daemon
func Daemon() error {
if err := sh.RunV("bin/build"); err != nil {
return err
}
cmd := "build/myst/myst daemon"
if runtime.GOOS == "darwin" {
cmd = "sudo " + cmd
}
cmdParts := strings.Split(cmd, " ")
return sh.RunV(cmdParts[0], cmdParts[1:]...)
}
// Openvpn builds and starts openvpn service with terms accepted
func Openvpn() error {
if err := sh.RunV("bin/build"); err != nil {
return err
}
cmd := "build/myst/myst service --agreed-terms-and-conditions openvpn"
if runtime.GOOS == "darwin" {
cmd = "sudo " + cmd
}
cmdParts := strings.Split(cmd, " ")
return sh.RunV(cmdParts[0], cmdParts[1:]...)
}
// Wireguard builds and starts wireguard service with terms accepted
func Wireguard() error {
if err := sh.RunV("bin/build"); err != nil {
return err
}
cmd := "build/myst/myst service --agreed-terms-and-conditions wireguard"
if runtime.GOOS == "darwin" {
cmd = "sudo " + cmd
}
cmdParts := strings.Split(cmd, " ")
return sh.RunV(cmdParts[0], cmdParts[1:]...)
}
// CLI builds and runs myst CLI
func CLI() error {
if err := sh.RunV("bin/build"); err != nil {
return err
}
return sh.RunV("build/myst/myst", "cli")
}
| 1 | 15,535 | Does `sudo` required for `darwin` only? Don't we need it for `linux` too? | mysteriumnetwork-node | go |
@@ -11,11 +11,17 @@ import (
"text/tabwriter"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
+ "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/cloudformation"
+ "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/resourcegroups"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/store"
sess "github.com/aws/aws-sdk-go/aws/session"
)
+const (
+ cloudformationResourceType = "AWS::CloudFormation::Stack"
+)
+
type EnvDescription struct {
Environment *archer.Environment `json:"environment"`
Applications []*archer.Application `json:"applications"` | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package describe
import (
"bytes"
"encoding/json"
"fmt"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color"
"text/tabwriter"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/store"
sess "github.com/aws/aws-sdk-go/aws/session"
)
type EnvDescription struct {
Environment *archer.Environment `json:"environment"`
Applications []*archer.Application `json:"applications"`
Tags map[string]string `json:"tags,omitempty"`
}
// EnvDescriber retrieves information about an environment.
type EnvDescriber struct {
env *archer.Environment
apps []*archer.Application
store storeSvc
sessProvider *sess.Session
}
// NewEnvDescriber instantiates an environment describer.
func NewEnvDescriber(projectName string, envName string) (*EnvDescriber, error) {
svc, err := store.New()
if err != nil {
return nil, fmt.Errorf("connect to store: %w", err)
}
env, err := svc.GetEnvironment(projectName, envName)
if err != nil {
return nil, err
}
apps, err := svc.ListApplications(projectName)
if err != nil {
return nil, err
}
sess, err := session.NewProvider().FromRole(env.ManagerRoleARN, env.Region)
if err != nil {
return nil, fmt.Errorf("assuming role for environment %s: %w", env.ManagerRoleARN, err)
}
return &EnvDescriber{
env: env,
store: svc,
apps: apps,
sessProvider: sess,
}, nil
}
// Describe returns info about a project's environment.
func (e *EnvDescriber) Describe() (*EnvDescription, error) {
var tags map[string]string
return &EnvDescription{
Environment: e.env,
Applications: e.apps,
Tags: tags,
}, nil
}
// JSONString returns the stringified EnvDescription struct with json format.
func (e *EnvDescription) JSONString() (string, error) {
b, err := json.Marshal(e)
if err != nil {
return "", fmt.Errorf("marshal applications: %w", err)
}
return fmt.Sprintf("%s\n", b), nil
}
// HumanString returns the stringified EnvDescription struct with human readable format.
func (e *EnvDescription) HumanString() string {
var b bytes.Buffer
writer := tabwriter.NewWriter(&b, minCellWidth, tabWidth, cellPaddingWidth, paddingChar, noAdditionalFormatting)
fmt.Fprintf(writer, color.Bold.Sprint("About\n\n"))
writer.Flush()
fmt.Fprintf(writer, " %s\t%s\n", "Name", e.Environment.Name)
fmt.Fprintf(writer, " %s\t%t\n", "Production", e.Environment.Prod)
fmt.Fprintf(writer, " %s\t%s\n", "Region", e.Environment.Region)
fmt.Fprintf(writer, " %s\t%s\n", "Account ID", e.Environment.AccountID)
fmt.Fprintf(writer, color.Bold.Sprint("\nApplications\n\n"))
writer.Flush()
fmt.Fprintf(writer, " %s\t%s\n", "Name", "Type")
for _, app := range e.Applications {
fmt.Fprintf(writer, " %s\t%s\n", app.Name, app.Type)
}
writer.Flush()
return b.String()
}
| 1 | 12,956 | Can you add description to this exported struct? | aws-copilot-cli | go |
@@ -68,13 +68,14 @@ export function renderComponent(component, opts, mountAll, isChild) {
state = component.state,
context = component.context,
previousProps = component.prevProps || props,
- previousState = component.prevState || state,
+ previousState = extend({}, component.prevState || state),
previousContext = component.prevContext || context,
isUpdate = component.base,
nextBase = component.nextBase,
initialBase = isUpdate || nextBase,
initialChildComponent = component._component,
skip = false,
+ snapshot = previousContext,
rendered, inst, cbase;
if (component.constructor.getDerivedStateFromProps) { | 1 | import { SYNC_RENDER, NO_RENDER, FORCE_RENDER, ASYNC_RENDER, ATTR_KEY } from '../constants';
import options from '../options';
import { extend } from '../util';
import { enqueueRender } from '../render-queue';
import { getNodeProps } from './index';
import { diff, mounts, diffLevel, flushMounts, recollectNodeTree, removeChildren } from './diff';
import { createComponent, collectComponent } from './component-recycler';
import { removeNode } from '../dom/index';
/**
* Set a component's `props` (generally derived from JSX attributes).
* @param {Object} props
* @param {number} [opts] Render mode, see constants.js for available options.
*/
export function setComponentProps(component, props, opts, context, mountAll) {
if (component._disable) return;
component._disable = true;
if ((component.__ref = props.ref)) delete props.ref;
if ((component.__key = props.key)) delete props.key;
if (typeof component.constructor.getDerivedStateFromProps === 'undefined') {
if (!component.base || mountAll) {
if (component.componentWillMount) component.componentWillMount();
}
else if (component.componentWillReceiveProps) {
component.componentWillReceiveProps(props, context);
}
}
if (context && context!==component.context) {
if (!component.prevContext) component.prevContext = component.context;
component.context = context;
}
if (!component.prevProps) component.prevProps = component.props;
component.props = props;
component._disable = false;
if (opts!==NO_RENDER) {
if (opts===SYNC_RENDER || options.syncComponentUpdates!==false || !component.base) {
renderComponent(component, SYNC_RENDER, mountAll);
}
else {
enqueueRender(component);
}
}
if (component.__ref) component.__ref(component);
}
/**
* Render a Component, triggering necessary lifecycle events and taking
* High-Order Components into account.
* @param {Component} component
* @param {number} [opts] render mode, see constants.js for available options.
* @param {boolean} [mountAll=false]
* @param {boolean} [isChild=false]
* @private
*/
export function renderComponent(component, opts, mountAll, isChild) {
if (component._disable) return;
let props = component.props,
state = component.state,
context = component.context,
previousProps = component.prevProps || props,
previousState = component.prevState || state,
previousContext = component.prevContext || context,
isUpdate = component.base,
nextBase = component.nextBase,
initialBase = isUpdate || nextBase,
initialChildComponent = component._component,
skip = false,
rendered, inst, cbase;
if (component.constructor.getDerivedStateFromProps) {
state = component.state = extend(state, component.constructor.getDerivedStateFromProps(props, state));
}
// if updating
if (isUpdate) {
component.props = previousProps;
component.state = previousState;
component.context = previousContext;
if (opts!==FORCE_RENDER
&& component.shouldComponentUpdate
&& component.shouldComponentUpdate(props, state, context) === false) {
skip = true;
}
else if (component.componentWillUpdate) {
component.componentWillUpdate(props, state, context);
}
component.props = props;
component.state = state;
component.context = context;
}
component.prevProps = component.prevState = component.prevContext = component.nextBase = null;
component._dirty = false;
if (!skip) {
rendered = component.render(props, state, context);
// context to pass to the child, can be updated via (grand-)parent component
if (component.getChildContext) {
context = extend(extend({}, context), component.getChildContext());
}
let childComponent = rendered && rendered.nodeName,
toUnmount, base;
if (typeof childComponent==='function') {
// set up high order component link
let childProps = getNodeProps(rendered);
inst = initialChildComponent;
if (inst && inst.constructor===childComponent && childProps.key==inst.__key) {
setComponentProps(inst, childProps, SYNC_RENDER, context, false);
}
else {
toUnmount = inst;
component._component = inst = createComponent(childComponent, childProps, context);
inst.nextBase = inst.nextBase || nextBase;
inst._parentComponent = component;
setComponentProps(inst, childProps, NO_RENDER, context, false);
renderComponent(inst, SYNC_RENDER, mountAll, true);
}
base = inst.base;
}
else {
cbase = initialBase;
// destroy high order component link
toUnmount = initialChildComponent;
if (toUnmount) {
cbase = component._component = null;
}
if (initialBase || opts===SYNC_RENDER) {
if (cbase) cbase._component = null;
base = diff(cbase, rendered, context, mountAll || !isUpdate, initialBase && initialBase.parentNode, true);
}
}
if (initialBase && base!==initialBase && inst!==initialChildComponent) {
let baseParent = initialBase.parentNode;
if (baseParent && base!==baseParent) {
baseParent.replaceChild(base, initialBase);
if (!toUnmount) {
initialBase._component = null;
recollectNodeTree(initialBase, false);
}
}
}
if (toUnmount) {
unmountComponent(toUnmount);
}
component.base = base;
if (base && !isChild) {
let componentRef = component,
t = component;
while ((t=t._parentComponent)) {
(componentRef = t).base = base;
}
base._component = componentRef;
base._componentConstructor = componentRef.constructor;
}
}
if (!isUpdate || mountAll) {
mounts.unshift(component);
}
else if (!skip) {
// Ensure that pending componentDidMount() hooks of child components
// are called before the componentDidUpdate() hook in the parent.
// Note: disabled as it causes duplicate hooks, see https://github.com/developit/preact/issues/750
// flushMounts();
if (component.componentDidUpdate) {
component.componentDidUpdate(previousProps, previousState, previousContext);
}
if (options.afterUpdate) options.afterUpdate(component);
}
if (component._renderCallbacks!=null) {
while (component._renderCallbacks.length) component._renderCallbacks.pop().call(component);
}
if (!diffLevel && !isChild) flushMounts();
}
/**
* Apply the Component referenced by a VNode to the DOM.
* @param {Element} dom The DOM node to mutate
* @param {VNode} vnode A Component-referencing VNode
* @returns {Element} The created/mutated element
* @private
*/
export function buildComponentFromVNode(dom, vnode, context, mountAll) {
let c = dom && dom._component,
originalComponent = c,
oldDom = dom,
isDirectOwner = c && dom._componentConstructor===vnode.nodeName,
isOwner = isDirectOwner,
props = getNodeProps(vnode);
while (c && !isOwner && (c=c._parentComponent)) {
isOwner = c.constructor===vnode.nodeName;
}
if (c && isOwner && (!mountAll || c._component)) {
setComponentProps(c, props, ASYNC_RENDER, context, mountAll);
dom = c.base;
}
else {
if (originalComponent && !isDirectOwner) {
unmountComponent(originalComponent);
dom = oldDom = null;
}
c = createComponent(vnode.nodeName, props, context);
if (dom && !c.nextBase) {
c.nextBase = dom;
// passing dom/oldDom as nextBase will recycle it if unused, so bypass recycling on L229:
oldDom = null;
}
setComponentProps(c, props, SYNC_RENDER, context, mountAll);
dom = c.base;
if (oldDom && dom!==oldDom) {
oldDom._component = null;
recollectNodeTree(oldDom, false);
}
}
return dom;
}
/**
* Remove a component from the DOM and recycle it.
* @param {Component} component The Component instance to unmount
* @private
*/
export function unmountComponent(component) {
if (options.beforeUnmount) options.beforeUnmount(component);
let base = component.base;
component._disable = true;
if (component.componentWillUnmount) component.componentWillUnmount();
component.base = null;
// recursively tear down & recollect high-order component children:
let inner = component._component;
if (inner) {
unmountComponent(inner);
}
else if (base) {
if (base[ATTR_KEY] && base[ATTR_KEY].ref) base[ATTR_KEY].ref(null);
component.nextBase = base;
removeNode(base);
collectComponent(component);
removeChildren(base);
}
if (component.__ref) component.__ref(null);
}
| 1 | 11,952 | Although this is needed for `getSnapshotBeforeUpdate` this also fixes a bug with `componentDidUpdate`. During rendering, the `state` variable is mutated. This has the consequence, that `previousState` would never hold the previous state, but the most current one. | preactjs-preact | js |
@@ -15,6 +15,7 @@ package ec2
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
+ "github.com/aws/aws-sdk-go/service/elb/elbiface"
)
// Service holds a collection of interfaces. | 1 | // Copyright © 2018 The Kubernetes Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ec2
import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
)
// Service holds a collection of interfaces.
// The interfaces are broken down like this to group functions together.
// One alternative is to have a large list of functions from the ec2 client.
type Service struct {
EC2 ec2iface.EC2API
}
// NewService returns a new service given the ec2 api client.
func NewService(i ec2iface.EC2API) *Service {
return &Service{
EC2: i,
}
}
| 1 | 6,262 | Should this maybe directly start with ELBv2 aka NLBs or ALBs? | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -59,6 +59,11 @@ describe( 'Tag Manager module setup', () => {
request.respond( {
status: 200,
} );
+ } else if ( request.url().match( 'google-site-kit/v1/core/site/data/notifications' ) ) {
+ request.respond( {
+ status: 200,
+ body: JSON.stringify( [] ),
+ } );
} else if ( request.url().match( 'modules/tagmanager/data/live-container-version' ) ) {
// Return a live container version without GA.
request.respond( { status: 200, body: JSON.stringify( liveContainerVersionFixture ) } ); | 1 | /**
* TagManager module setup tests.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import {
activatePlugin,
deactivatePlugin,
visitAdminPage,
createURL,
} from '@wordpress/e2e-test-utils';
/**
* Internal dependencies
*/
import {
deactivateUtilityPlugins,
pageWait,
resetSiteKit,
setAMPMode,
setupSiteKit,
useRequestInterception,
} from '../../../utils';
import liveContainerVersionFixture from '../../../../../assets/js/modules/tagmanager/datastore/__fixtures__/live-container-version.json';
async function proceedToTagManagerSetup() {
await visitAdminPage( 'admin.php', 'page=googlesitekit-settings' );
await page.waitForSelector( '.mdc-tab-bar' );
await expect( page ).toClick( '.mdc-tab', { text: /connect more services/i } );
await page.waitForSelector( '.googlesitekit-settings-connect-module--tagmanager' );
await Promise.all( [
page.waitForSelector( '.googlesitekit-setup-module__action .mdc-button' ),
expect( page ).toClick( '.googlesitekit-cta-link', { text: /set up tag manager/i } ),
] );
}
describe( 'Tag Manager module setup', () => {
beforeAll( async () => {
await page.setRequestInterception( true );
useRequestInterception( ( request ) => {
if ( request.url().match( 'google-site-kit/v1/data/' ) ) {
request.respond( {
status: 200,
} );
} else if ( request.url().match( 'modules/tagmanager/data/live-container-version' ) ) {
// Return a live container version without GA.
request.respond( { status: 200, body: JSON.stringify( liveContainerVersionFixture ) } );
} else if ( request.url().match( /^https:\/\/www\.googletagmanager\.com\/(gtm\.js|amp\.json)/ ) ) {
request.respond( { status: 200 } );
} else {
request.continue();
}
} );
} );
beforeEach( async () => {
await setupSiteKit();
} );
afterEach( async () => {
await deactivateUtilityPlugins();
await resetSiteKit();
} );
describe( 'Setup without AMP active', () => {
beforeAll( async () => {
await deactivatePlugin( 'amp' );
} );
it( 'displays account creation form when user has no Tag Manager account', async () => {
await activatePlugin( 'e2e-tests-module-setup-tagmanager-api-mock-no-account' );
await proceedToTagManagerSetup();
// Intercept the call to window.open and call our API to simulate a created account.
await page.evaluate( () => {
window.open = () => {
window._e2eApiFetch( {
path: 'google-site-kit/v1/e2e/setup/tagmanager/account-created',
method: 'post',
} );
};
} );
// Clicking Create Account button will switch API mock plugins on the server to the one that has accounts.
await Promise.all( [
page.waitForResponse( ( res ) => res.url().match( 'google-site-kit/v1/e2e/setup/tagmanager/account-created' ) ),
expect( page ).toClick( '.mdc-button', { text: /Create an account/i } ),
] );
await Promise.all( [
page.waitForResponse( ( req ) => req.url().match( 'tagmanager/data/accounts' ) ),
expect( page ).toClick( '.googlesitekit-cta-link', { text: /Re-fetch My Account/i } ),
] );
await page.waitForSelector( '.googlesitekit-setup-module__inputs' );
// Ensure account and container selections are cleared.
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-account .mdc-select__selected-text', { text: '' } );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container .mdc-select__selected-text', { text: '' } );
// Choose an account.
await expect( page ).toClick( '.googlesitekit-tagmanager__select-account' );
await expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: /test account a/i } );
// Ensure "Set up a new container" option is present in container select.
await expect( page ).toClick( '.googlesitekit-tagmanager__select-container' );
await expect( page ).toMatchElement( '.mdc-menu-surface--open .mdc-list-item', { text: /set up a new container/i } );
await expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: /test container x/i } );
await pageWait( 1000 );
await expect( page ).toClick( 'button', { text: /confirm \& continue/i } );
await page.waitForSelector( '.googlesitekit-publisher-win--win-success' );
await expect( page ).toMatchElement( '.googlesitekit-publisher-win__title', { text: /Congrats on completing the setup for Tag Manager!/i } );
// Ensure expected tag is placed.
await Promise.all( [
page.goto( createURL( '/' ) ),
page.waitForNavigation(),
] );
await expect( page ).toMatchElement( 'script[src^="https://www.googletagmanager.com/gtm.js?id=GTM-ABCXYZ"]' );
} );
it( 'displays available accounts and containers for the chosen account', async () => {
await activatePlugin( 'e2e-tests-module-setup-tagmanager-api-mock' );
await proceedToTagManagerSetup();
// Ensure only web container select is shown.
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--web' );
await expect( page ).not.toMatchElement( '.googlesitekit-tagmanager__select-container--amp' );
// Ensure account and container are selected by default.
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-account .mdc-select__selected-text', { text: /test account a/i } );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container .mdc-select__selected-text', { text: /test container x/i } );
// Ensure choosing a different account loads the proper values.
await expect( page ).toClick( '.googlesitekit-tagmanager__select-account' );
await Promise.all( [
page.waitForResponse( ( res ) => res.url().match( 'modules/tagmanager/data' ) ),
expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: /test account b/i } ),
] );
// Ensure account is selected.
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-account .mdc-select__selected-text', { text: /test account b/i } );
// Select a container.
await expect( page ).toClick( '.googlesitekit-tagmanager__select-container' );
// Ensure no AMP containers are shown as options.
// expect(...).not.toMatchElement with textContent matching does not work as expected.
await expect(
await page.$$eval( '.mdc-menu-surface--open .mdc-list-item', ( nodes ) => !! nodes.find( ( e ) => e.textContent.match( /test amp container/i ) ) )
).toStrictEqual( false );
await expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: /test container y/i } );
await pageWait( 1000 );
await expect( page ).toClick( 'button', { text: /confirm \& continue/i } );
await page.waitForSelector( '.googlesitekit-publisher-win--win-success' );
await expect( page ).toMatchElement( '.googlesitekit-publisher-win__title', { text: /Congrats on completing the setup for Tag Manager!/i } );
// Ensure expected tag is placed.
await Promise.all( [
page.goto( createURL( '/' ) ),
page.waitForNavigation(),
] );
await expect( page ).toMatchElement( 'script[src^="https://www.googletagmanager.com/gtm.js?id=GTM-BCDWXY"]' );
} );
it( 'displays instructions for account creation when "Set up a new account" option is selected', async () => {
await activatePlugin( 'e2e-tests-module-setup-tagmanager-api-mock' );
await proceedToTagManagerSetup();
// Ensure "setup a new account" is an available choice.
await expect( page ).toClick( '.googlesitekit-tagmanager__select-account' );
await expect( page ).toMatchElement( '.mdc-menu-surface--open .mdc-list-item', { text: /set up a new account/i } );
// Choose set up a new account.
await expect( page ).toClick( '.mdc-menu-surface--open .mdc-list-item', { text: /set up a new account/i } );
// Ensure instructions are present.
await expect( page ).toMatchElement( '.googlesitekit-setup-module p', { text: /to create a new account/i } );
// Ensure buttons are present.
await expect( page ).toMatchElement( '.googlesitekit-setup-module .mdc-button', { text: /create an account/i } );
await expect( page ).toMatchElement( '.googlesitekit-setup-module .googlesitekit-cta-link', { text: /re-fetch my account/i } );
} );
} );
describe( 'Setup with AMP active', () => {
beforeAll( async () => {
await activatePlugin( 'amp' );
} );
beforeEach( async () => {
await activatePlugin( 'e2e-tests-module-setup-tagmanager-api-mock' );
await proceedToTagManagerSetup();
} );
afterAll( async () => {
await deactivatePlugin( 'amp' );
} );
describe( 'with Primary AMP', () => {
beforeAll( async () => {
await setAMPMode( 'primary' );
} );
it( 'renders only the AMP container select menu', async () => {
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--amp' );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--amp .mdc-floating-label', { text: 'Container' } );
await expect( page ).not.toMatchElement( '.googlesitekit-tagmanager__select-container--web' );
} );
it( 'validates Homepage AMP for logged-in users', async () => {
await expect( '/' ).toHaveValidAMPForUser();
} );
it( 'validates Homepage AMP for non-logged-in users', async () => {
await expect( '/' ).toHaveValidAMPForVisitor();
} );
} );
describe( 'with Secondary AMP', () => {
beforeAll( async () => {
await setAMPMode( 'secondary' );
} );
it( 'renders both the AMP and web container select menus', async () => {
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--web' );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--web .mdc-floating-label', { text: 'Web Container' } );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--amp' );
await expect( page ).toMatchElement( '.googlesitekit-tagmanager__select-container--amp .mdc-floating-label', { text: 'AMP Container' } );
} );
it( 'validates homepage AMP for logged-in users', async () => {
await expect( page ).toClick( 'button:not(:disabled)', { text: /confirm \& continue/i } );
await page.waitForSelector( '.googlesitekit-publisher-win--win-success' );
await expect( page ).toMatchElement( '.googlesitekit-publisher-win__title', { text: /Congrats on completing the setup for Tag Manager!/i } );
await page.goto( createURL( '/', 'amp' ), { waitUntil: 'load' } );
await expect( page ).toHaveValidAMPForUser();
} );
it( 'validates homepage AMP for non-logged-in users', async () => {
await expect( page ).toClick( 'button:not(:disabled)', { text: /confirm \& continue/i } );
await page.waitForSelector( '.googlesitekit-publisher-win--win-success' );
await expect( page ).toMatchElement( '.googlesitekit-publisher-win__title', { text: /Congrats on completing the setup for Tag Manager!/i } );
await page.goto( createURL( '/', 'amp' ), { waitUntil: 'load' } );
await expect( page ).toHaveValidAMPForVisitor();
} );
} );
} );
} );
| 1 | 36,079 | This was the request that was often causing the test to fail as an unexpected API failure (or at least one of them ). | google-site-kit-wp | js |
@@ -0,0 +1,13 @@
+class NetworkError extends Error {
+ constructor (error = null, xhr = null) {
+ super(error.message)
+
+ this.isNetworkError = true
+ this.originalRequest = xhr
+
+ const message = error.message + '. This looks like a network error, the endpoint might be blocked by an ISP or a firewall'
+ this.message = message
+ }
+}
+
+module.exports = NetworkError | 1 | 1 | 12,845 | I understand why `xhr = null`, but I think adding `error = null` implies that this method should work even if we don't pass the `error` argument. Should we remove it? | transloadit-uppy | js |
|
@@ -73,7 +73,8 @@ public abstract class GapicMethodConfig extends MethodConfig {
Method method,
ProtoMethodModel methodModel,
RetryCodesConfig retryCodesConfig,
- ImmutableSet<String> retryParamsConfigNames) {
+ ImmutableSet<String> retryParamsConfigNames,
+ GrpcGapicRetryMapping retryMapping) {
GrpcStreamingConfig grpcStreaming = null;
if (isGrpcStreamingMethod(methodModel)) { | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import static com.google.api.codegen.configgen.transformer.RetryTransformer.DEFAULT_MAX_RETRY_DELAY;
import com.google.api.codegen.BatchingConfigProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.PageStreamingConfigProto;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.SurfaceTreatmentProto;
import com.google.api.codegen.VisibilityProto;
import com.google.api.codegen.common.TargetLanguage;
import com.google.api.codegen.configgen.ProtoMethodTransformer;
import com.google.api.codegen.transformer.RetryDefinitionsTransformer;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.MessageType;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.ProtoFile;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.threeten.bp.Duration;
/**
* GapicMethodConfig represents the code-gen config for a method, and includes the specification of
* features like page streaming and parameter flattening.
*/
@AutoValue
public abstract class GapicMethodConfig extends MethodConfig {
public Method getMethod() {
return ((ProtoMethodModel) getMethodModel()).getProtoMethod();
}
public abstract Iterable<String> getHeaderRequestParams();
/**
* Creates an instance of GapicMethodConfig based on MethodConfigProto, linking it up with the
* provided method. On errors, null will be returned, and diagnostics are reported to the diag
* collector.
*/
@Nullable
private static GapicMethodConfig.Builder createCommonMethodConfig(
DiagCollector diagCollector,
TargetLanguage language,
@Nonnull MethodConfigProto methodConfigProto,
Method method,
ProtoMethodModel methodModel,
RetryCodesConfig retryCodesConfig,
ImmutableSet<String> retryParamsConfigNames) {
GrpcStreamingConfig grpcStreaming = null;
if (isGrpcStreamingMethod(methodModel)) {
if (PageStreamingConfigProto.getDefaultInstance()
.equals(methodConfigProto.getGrpcStreaming())) {
grpcStreaming = GrpcStreamingConfig.createGrpcStreaming(diagCollector, method);
} else {
grpcStreaming =
GrpcStreamingConfig.createGrpcStreaming(
diagCollector, methodConfigProto.getGrpcStreaming(), method);
}
}
BatchingConfig batching = null;
if (!BatchingConfigProto.getDefaultInstance().equals(methodConfigProto.getBatching())) {
batching =
BatchingConfig.createBatching(
diagCollector, methodConfigProto.getBatching(), methodModel);
}
String retryCodesName = retryCodesConfig.getMethodRetryNames().get(method.getSimpleName());
String retryParamsName =
RetryDefinitionsTransformer.getRetryParamsName(
methodConfigProto, diagCollector, retryParamsConfigNames);
long defaultTimeout = methodConfigProto.getTimeoutMillis();
if (defaultTimeout <= 0) {
defaultTimeout = DEFAULT_MAX_RETRY_DELAY;
}
long timeoutMillis = ProtoMethodTransformer.getTimeoutMillis(methodModel, defaultTimeout);
Duration timeout = Duration.ofMillis(timeoutMillis);
if (timeout.toMillis() <= 0) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Default timeout not found or has invalid value (in method %s)",
methodModel.getFullName()));
}
List<String> sampleCodeInitFields =
new ArrayList<>(methodConfigProto.getSampleCodeInitFieldsList());
SampleSpec sampleSpec = new SampleSpec(methodConfigProto);
String rerouteToGrpcInterface =
Strings.emptyToNull(methodConfigProto.getRerouteToGrpcInterface());
VisibilityConfig visibility = VisibilityConfig.PUBLIC;
ReleaseLevel releaseLevel = ReleaseLevel.GA;
for (SurfaceTreatmentProto treatment : methodConfigProto.getSurfaceTreatmentsList()) {
if (!treatment.getIncludeLanguagesList().contains(language.toString().toLowerCase())) {
continue;
}
if (treatment.getVisibility() != VisibilityProto.UNSET_VISIBILITY) {
visibility = VisibilityConfig.fromProto(treatment.getVisibility());
}
if (treatment.getReleaseLevel() != ReleaseLevel.UNSET_RELEASE_LEVEL) {
releaseLevel = treatment.getReleaseLevel();
}
}
List<String> headerRequestParams = findHeaderRequestParams(method);
return new AutoValue_GapicMethodConfig.Builder()
.setMethodModel(methodModel)
.setGrpcStreaming(grpcStreaming)
.setRetryCodesConfigName(retryCodesName)
.setRetrySettingsConfigName(retryParamsName)
.setTimeout(timeout)
.setBatching(batching)
.setSampleCodeInitFields(sampleCodeInitFields)
.setSampleSpec(sampleSpec)
.setRerouteToGrpcInterface(rerouteToGrpcInterface)
.setVisibility(visibility)
.setReleaseLevel(releaseLevel)
.setHeaderRequestParams(headerRequestParams);
}
@Nullable
static GapicMethodConfig createGapicMethodConfigFromProto(
DiagCollector diagCollector,
TargetLanguage language,
String defaultPackageName,
@Nonnull MethodConfigProto methodConfigProto,
Method method,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
RetryCodesConfig retryCodesConfig,
ImmutableSet<String> retryParamsConfigNames,
ProtoParser protoParser) {
int previousErrors = diagCollector.getErrorCount();
ProtoMethodModel methodModel = new ProtoMethodModel(method);
ImmutableMap<String, String> fieldNamePatterns = getFieldNamePatterns(method, messageConfigs);
List<String> requiredFields = protoParser.getRequiredFields(method);
ResourceNameTreatment defaultResourceNameTreatment = ResourceNameTreatment.UNSET_TREATMENT;
GapicMethodConfig.Builder builder =
createCommonMethodConfig(
diagCollector,
language,
methodConfigProto,
method,
methodModel,
retryCodesConfig,
retryParamsConfigNames)
.setPageStreaming(
PageStreamingConfig.createPageStreamingConfig(
diagCollector,
defaultPackageName,
methodModel,
methodConfigProto,
messageConfigs,
resourceNameConfigs,
protoParser))
.setFlatteningConfigs(
FlatteningConfig.createFlatteningConfigs(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
methodModel,
protoParser))
.setFieldNamePatterns(fieldNamePatterns)
.setRequiredFieldConfigs(
createFieldNameConfigs(
diagCollector,
messageConfigs,
defaultResourceNameTreatment,
fieldNamePatterns,
resourceNameConfigs,
getRequiredFields(diagCollector, methodModel, requiredFields)))
.setOptionalFieldConfigs(
createFieldNameConfigs(
diagCollector,
messageConfigs,
defaultResourceNameTreatment,
fieldNamePatterns,
resourceNameConfigs,
getOptionalFields(methodModel, requiredFields)))
.setLroConfig(
LongRunningConfig.createLongRunningConfig(
method, diagCollector, methodConfigProto.getLongRunning(), protoParser));
if (diagCollector.getErrorCount() - previousErrors > 0) {
return null;
} else {
return builder.build();
}
}
@Nullable
static GapicMethodConfig createGapicMethodConfigFromGapicYaml(
DiagCollector diagCollector,
TargetLanguage language,
@Nonnull MethodConfigProto methodConfigProto,
Method method,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
RetryCodesConfig retryCodesConfig,
ImmutableSet<String> retryParamsConfigNames) {
int previousErrors = diagCollector.getErrorCount();
ProtoMethodModel methodModel = new ProtoMethodModel(method);
List<String> requiredFields = methodConfigProto.getRequiredFieldsList();
ImmutableMap<String, String> fieldNamePatterns =
ImmutableMap.copyOf(methodConfigProto.getFieldNamePatterns());
ResourceNameTreatment defaultResourceNameTreatment =
methodConfigProto.getResourceNameTreatment();
GapicMethodConfig.Builder builder =
createCommonMethodConfig(
diagCollector,
language,
methodConfigProto,
method,
methodModel,
retryCodesConfig,
retryParamsConfigNames)
.setPageStreaming(
PageStreamingConfig.createPageStreamingConfig(
diagCollector,
methodModel,
methodConfigProto,
messageConfigs,
resourceNameConfigs))
.setFlatteningConfigs(
FlatteningConfig.createFlatteningConfigs(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
methodModel))
.setFieldNamePatterns(fieldNamePatterns)
.setRequiredFieldConfigs(
createFieldNameConfigs(
diagCollector,
messageConfigs,
defaultResourceNameTreatment,
fieldNamePatterns,
resourceNameConfigs,
getRequiredFields(diagCollector, methodModel, requiredFields)))
.setOptionalFieldConfigs(
createFieldNameConfigs(
diagCollector,
messageConfigs,
defaultResourceNameTreatment,
fieldNamePatterns,
resourceNameConfigs,
getOptionalFields(methodModel, requiredFields)))
.setLroConfig(
LongRunningConfig.createLongRunningConfigFromGapicConfigOnly(
method.getModel(), diagCollector, methodConfigProto.getLongRunning()));
if (diagCollector.getErrorCount() - previousErrors > 0) {
return null;
} else {
return builder.build();
}
}
private static List<String> findHeaderRequestParams(Method method) {
// Always parse header request params only from proto annotations, even if GAPIC config is
// given.
ProtoParser protoParser = new ProtoParser(true);
return protoParser.getHeaderParams(method).asList();
}
@VisibleForTesting
static ResourceNameTreatment defaultResourceNameTreatmentFromProto(
Method method, ProtoParser protoParser, String defaultPackageName) {
if (method.getInputMessage().getFields().stream().anyMatch(protoParser::hasResourceReference)) {
String methodInputPackageName =
protoParser.getProtoPackage(((ProtoFile) method.getInputMessage().getParent()));
if (defaultPackageName.equals(methodInputPackageName)) {
return ResourceNameTreatment.STATIC_TYPES;
} else {
return ResourceNameTreatment.VALIDATE;
}
} else {
return ResourceNameTreatment.UNSET_TREATMENT;
}
}
public static ImmutableMap<String, String> getFieldNamePatterns(
Method method, ResourceNameMessageConfigs messageConfigs) {
ImmutableMap.Builder<String, String> resultCollector = ImmutableMap.builder();
// Only look two levels deep in the request object, so fields of fields of the request object.
getFieldNamePatterns(messageConfigs, method.getInputMessage(), resultCollector, "", 2);
return resultCollector.build();
}
/**
* Recursively populates the given map builder with field name patterns, up to a given depth.
*
* <p>A field name pattern entry maps a field name String, which can be a dot-separated nested
* field such as "shelf.name", to the String name of the resource entity that is represented by
* that field.
*
* <p>Note: this method does not check for circular references.
*
* @param messageConfigs ResourceNameMessageConfigs object
* @param messageType the starting messageType from which to parse fields for resource names
* @param resultCollector collects the resulting field name patterns
* @param fieldNamePrefix a nested field is prefixed by the parents' names, dot-separated
* @param depth number of levels deep in which to parse the messageType; must be positive int
*/
private static void getFieldNamePatterns(
ResourceNameMessageConfigs messageConfigs,
MessageType messageType,
ImmutableMap.Builder<String, String> resultCollector,
String fieldNamePrefix,
int depth) {
if (depth < 1) throw new IllegalStateException("depth must be positive");
for (Field field : messageType.getFields()) {
String fieldNameKey = fieldNamePrefix + field.getSimpleName();
if (field.getType().isMessage() && depth > 1) {
getFieldNamePatterns(
messageConfigs,
field.getType().getMessageType(),
resultCollector,
fieldNameKey + ".",
depth - 1);
}
if (messageConfigs.fieldHasResourceName(messageType.getFullName(), field.getSimpleName())) {
resultCollector.put(
fieldNameKey,
messageConfigs.getFieldResourceName(messageType.getFullName(), field.getSimpleName()));
}
}
}
/** Return the list of "one of" instances associated with the fields. */
@Override
public ImmutableList<ImmutableList<String>> getOneofNames(SurfaceNamer namer) {
return ProtoField.getOneofFieldsNames(getOptionalFields(), namer);
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder setHeaderRequestParams(Iterable<String> val);
public abstract Builder setMethodModel(MethodModel val);
public abstract Builder setPageStreaming(@Nullable PageStreamingConfig val);
public abstract Builder setGrpcStreaming(@Nullable GrpcStreamingConfig val);
public abstract Builder setFlatteningConfigs(@Nullable ImmutableList<FlatteningConfig> val);
public abstract Builder setRetryCodesConfigName(String val);
public abstract Builder setRetrySettingsConfigName(String val);
public abstract Builder setTimeout(Duration val);
public abstract Builder setRequiredFieldConfigs(ImmutableList<FieldConfig> val);
public abstract Builder setOptionalFieldConfigs(ImmutableList<FieldConfig> val);
public abstract Builder setBatching(@Nullable BatchingConfig val);
public abstract Builder setFieldNamePatterns(ImmutableMap<String, String> val);
public abstract Builder setSampleCodeInitFields(List<String> val);
public abstract Builder setSampleSpec(SampleSpec val);
public abstract Builder setRerouteToGrpcInterface(@Nullable String val);
public abstract Builder setVisibility(VisibilityConfig val);
public abstract Builder setReleaseLevel(ReleaseLevel val);
public abstract Builder setLroConfig(@Nullable LongRunningConfig val);
public abstract GapicMethodConfig build();
}
}
| 1 | 29,773 | It is not your code, and it is used like this all over the place in gapic generator, but the general rule is we should prefer using the least specific class/interface in the hierarchy, which still satisfies our requirements. Here, unless we really need anything specific from `ImmutableSet`, please use `Set` (less specific type) instead. | googleapis-gapic-generator | java |
@@ -162,9 +162,18 @@ struct flb_elasticsearch *flb_es_conf_create(struct flb_output_instance *ins,
}
ctx->aws_region = (char *) tmp;
+ tmp = flb_output_get_property("aws_sts_endpoint", ins);
+ if (!tmp) {
+ flb_error("[out_es] aws_sts_endpoint not set");
+ flb_es_conf_destroy(ctx);
+ return NULL;
+ }
+ ctx->aws_sts_endpoint = (char *) tmp;
+
ctx->aws_provider = flb_standard_chain_provider_create(config,
&ctx->aws_tls,
ctx->aws_region,
+ ctx->aws_sts_endpoint,
NULL,
flb_aws_client_generator());
if (!ctx->aws_provider) { | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2020 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_output_plugin.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_utils.h>
#include <fluent-bit/flb_http_client.h>
#include <fluent-bit/flb_signv4.h>
#include <fluent-bit/flb_aws_credentials.h>
#include "es.h"
#include "es_conf.h"
struct flb_elasticsearch *flb_es_conf_create(struct flb_output_instance *ins,
struct flb_config *config)
{
int io_flags = 0;
ssize_t ret;
const char *tmp;
const char *path;
#ifdef FLB_HAVE_AWS
char *aws_role_arn = NULL;
char *aws_external_id = NULL;
char *aws_session_name = NULL;
#endif
struct flb_uri *uri = ins->host.uri;
struct flb_uri_field *f_index = NULL;
struct flb_uri_field *f_type = NULL;
struct flb_upstream *upstream;
struct flb_elasticsearch *ctx;
/* Allocate context */
ctx = flb_calloc(1, sizeof(struct flb_elasticsearch));
if (!ctx) {
flb_errno();
return NULL;
}
ctx->ins = ins;
if (uri) {
if (uri->count >= 2) {
f_index = flb_uri_get(uri, 0);
f_type = flb_uri_get(uri, 1);
}
}
/* Set default network configuration */
flb_output_net_default("127.0.0.1", 9200, ins);
/* Populate context with config map defaults and incoming properties */
ret = flb_output_config_map_set(ins, (void *) ctx);
if (ret == -1) {
flb_plg_error(ctx->ins, "configuration error");
flb_es_conf_destroy(ctx);
return NULL;
}
/* use TLS ? */
if (ins->use_tls == FLB_TRUE) {
io_flags = FLB_IO_TLS;
}
else {
io_flags = FLB_IO_TCP;
}
if (ins->host.ipv6 == FLB_TRUE) {
io_flags |= FLB_IO_IPV6;
}
/* Prepare an upstream handler */
upstream = flb_upstream_create(config,
ins->host.name,
ins->host.port,
io_flags,
&ins->tls);
if (!upstream) {
flb_plg_error(ctx->ins, "cannot create Upstream context");
flb_es_conf_destroy(ctx);
return NULL;
}
ctx->u = upstream;
/* Set instance flags into upstream */
flb_output_upstream_set(ctx->u, ins);
/* Set manual Index and Type */
if (f_index) {
ctx->index = flb_strdup(f_index->value); /* FIXME */
}
if (f_type) {
ctx->type = flb_strdup(f_type->value); /* FIXME */
}
/* HTTP Payload (response) maximum buffer size (0 == unlimited) */
if (ctx->buffer_size == -1) {
ctx->buffer_size = 0;
}
/* Elasticsearch: Path */
path = flb_output_get_property("path", ins);
if (!path) {
path = "";
}
/* Elasticsearch: Pipeline */
tmp = flb_output_get_property("pipeline", ins);
if (tmp) {
snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk/?pipeline=%s", path, tmp);
}
else {
snprintf(ctx->uri, sizeof(ctx->uri) - 1, "%s/_bulk", path);
}
#ifdef FLB_HAVE_AWS
/* AWS Auth */
ctx->has_aws_auth = FLB_FALSE;
tmp = flb_output_get_property("aws_auth", ins);
if (tmp) {
if (strncasecmp(tmp, "On", 2) == 0) {
ctx->has_aws_auth = FLB_TRUE;
flb_debug("[out_es] Enabled AWS Auth");
/* AWS provider needs a separate TLS instance */
ctx->aws_tls.context = flb_tls_context_new(FLB_TRUE,
ins->tls_debug,
ins->tls_vhost,
ins->tls_ca_path,
ins->tls_ca_file,
ins->tls_crt_file,
ins->tls_key_file,
ins->tls_key_passwd);
if (!ctx->aws_tls.context) {
flb_errno();
flb_es_conf_destroy(ctx);
return NULL;
}
tmp = flb_output_get_property("aws_region", ins);
if (!tmp) {
flb_error("[out_es] aws_auth enabled but aws_region not set");
flb_es_conf_destroy(ctx);
return NULL;
}
ctx->aws_region = (char *) tmp;
ctx->aws_provider = flb_standard_chain_provider_create(config,
&ctx->aws_tls,
ctx->aws_region,
NULL,
flb_aws_client_generator());
if (!ctx->aws_provider) {
flb_error("[out_es] Failed to create AWS Credential Provider");
flb_es_conf_destroy(ctx);
return NULL;
}
tmp = flb_output_get_property("aws_role_arn", ins);
if (tmp) {
/* Use the STS Provider */
ctx->base_aws_provider = ctx->aws_provider;
aws_role_arn = (char *) tmp;
aws_external_id = NULL;
tmp = flb_output_get_property("aws_external_id", ins);
if (tmp) {
aws_external_id = (char *) tmp;
}
aws_session_name = flb_sts_session_name();
if (!aws_session_name) {
flb_error("[out_es] Failed to create aws iam role "
"session name");
flb_es_conf_destroy(ctx);
return NULL;
}
/* STS provider needs yet another separate TLS instance */
ctx->aws_sts_tls.context = flb_tls_context_new(FLB_TRUE,
ins->tls_debug,
ins->tls_vhost,
ins->tls_ca_path,
ins->tls_ca_file,
ins->tls_crt_file,
ins->tls_key_file,
ins->tls_key_passwd);
if (!ctx->aws_sts_tls.context) {
flb_errno();
flb_es_conf_destroy(ctx);
return NULL;
}
ctx->aws_provider = flb_sts_provider_create(config,
&ctx->aws_sts_tls,
ctx->
base_aws_provider,
aws_external_id,
aws_role_arn,
aws_session_name,
ctx->aws_region,
NULL,
flb_aws_client_generator());
/* Session name can be freed once provider is created */
flb_free(aws_session_name);
if (!ctx->aws_provider) {
flb_error("[out_es] Failed to create AWS STS Credential "
"Provider");
flb_es_conf_destroy(ctx);
return NULL;
}
}
/* initialize credentials in sync mode */
ctx->aws_provider->provider_vtable->sync(ctx->aws_provider);
ctx->aws_provider->provider_vtable->init(ctx->aws_provider);
/* set back to async */
ctx->aws_provider->provider_vtable->async(ctx->aws_provider);
}
}
#endif
return ctx;
}
int flb_es_conf_destroy(struct flb_elasticsearch *ctx)
{
if (!ctx) {
return 0;
}
if (ctx->u) {
flb_upstream_destroy(ctx->u);
}
#ifdef FLB_HAVE_AWS
if (ctx->base_aws_provider) {
flb_aws_provider_destroy(ctx->base_aws_provider);
}
if (ctx->aws_provider) {
flb_aws_provider_destroy(ctx->aws_provider);
}
if (ctx->aws_tls.context) {
flb_tls_context_destroy(ctx->aws_tls.context);
}
if (ctx->aws_sts_tls.context) {
flb_tls_context_destroy(ctx->aws_sts_tls.context);
}
#endif
flb_free(ctx);
return 0;
}
| 1 | 12,703 | Sorry... I just realized... since we use sts_endpoint in the EKS Provider, this error message is no longer true. Role_arn is not required. | fluent-fluent-bit | c |
@@ -206,6 +206,18 @@ public interface Tree<T> extends Traversable<T>, Serializable {
return io.vavr.collection.Collections.fill(n, s, empty(), Tree::of);
}
+ /**
+ * Returns a Tree containing {@code n} times the given {@code element}
+ *
+ * @param <T> Component type of the Tree
+ * @param n The number of elements in the Tree
+ * @param element The element
+ * @return A Tree of size {@code n}, where each element is the given {@code element}.
+ */
+ static <T> Tree<T> fill(int n, T element) {
+ return io.vavr.collection.Collections.fillObject(n, element, empty(), Tree::of);
+ }
+
/**
* Recursively builds a non-empty {@code Tree}, starting with the given {@code seed} value and proceeding in depth-first order.
* <p> | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/
*
* Copyright 2014-2018 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.PartialFunction;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.Tuple3;
import io.vavr.collection.List.Nil;
import io.vavr.collection.Tree.*;
import io.vavr.control.HashCodes;
import io.vavr.control.Option;
import java.io.*;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
import static io.vavr.collection.Tree.Order.PRE_ORDER;
import static io.vavr.collection.Tree.*;
/**
* A general Tree interface.
*
* @param <T> component type of this Tree
* @author Daniel Dietrich, Grzegorz Piwowarek
*/
public interface Tree<T> extends Traversable<T>, Serializable {
long serialVersionUID = 1L;
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link Tree}.
*
* @param <T> Component type of the Tree.
* @return A io.vavr.collection.Tree Collector.
*/
static <T> Collector<T, ArrayList<T>, Tree<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Tree<T>> finisher = Tree::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns the singleton empty tree.
*
* @param <T> Type of tree values.
* @return The empty tree.
*/
static <T> Empty<T> empty() {
return Empty.instance();
}
/**
* Narrows a widened {@code Tree<? extends T>} to {@code Tree<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param tree An {@code Tree}.
* @param <T> Component type of the {@code Tree}.
* @return the given {@code tree} instance as narrowed type {@code Tree<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Tree<T> narrow(Tree<? extends T> tree) {
return (Tree<T>) tree;
}
/**
* Returns a new Node containing the given value and having no children.
*
* @param value A value
* @param <T> Value type
* @return A new Node instance.
*/
static <T> Node<T> of(T value) {
return new Node<>(value, io.vavr.collection.List.empty());
}
/**
* Returns a new Node containing the given value and having the given children.
*
* @param value A value
* @param children The child nodes, possibly empty
* @param <T> Value type
* @return A new Node instance.
*/
@SuppressWarnings("varargs")
@SafeVarargs
static <T> Node<T> of(T value, Node<T>... children) {
Objects.requireNonNull(children, "children is null");
return new Node<>(value, io.vavr.collection.List.of(children));
}
/**
* Returns a new Node containing the given value and having the given children.
*
* @param value A value
* @param children The child nodes, possibly empty
* @param <T> Value type
* @return A new Node instance.
*/
static <T> Node<T> of(T value, Iterable<Node<T>> children) {
Objects.requireNonNull(children, "children is null");
return new Node<>(value, io.vavr.collection.List.ofAll(children));
}
/**
* Creates a Tree of the given elements.
*
* @param <T> Component type of the List.
* @param values Zero or more values.
* @return A Tree containing the given values.
* @throws NullPointerException if {@code values} is null
*/
@SuppressWarnings("varargs")
@SafeVarargs
static <T> Tree<T> of(T... values) {
Objects.requireNonNull(values, "values is null");
final io.vavr.collection.List<T> list = io.vavr.collection.List.of(values);
return list.isEmpty() ? Empty.instance() : new Node<>(list.head(), list.tail().map(Tree::of));
}
/**
* Creates a Tree of the given elements.
* <p>
* If the given iterable is a tree, it is returned as result.
* if the iteration order of the elements is stable.
*
* @param <T> Component type of the List.
* @param iterable An Iterable of elements.
* @return A list containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
@SuppressWarnings("unchecked")
static <T> Tree<T> ofAll(Iterable<? extends T> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
if (iterable instanceof Tree) {
return (Tree<T>) iterable;
} else {
final io.vavr.collection.List<T> list = io.vavr.collection.List.ofAll(iterable);
return list.isEmpty() ? Empty.instance() : new Node<>(list.head(), list.tail().map(Tree::of));
}
}
/**
* Creates a Tree that contains the elements of the given {@link java.util.stream.Stream}.
*
* @param javaStream A {@link java.util.stream.Stream}
* @param <T> Component type of the Stream.
* @return A Tree containing the given elements in the same order.
*/
static <T> Tree<T> ofAll(java.util.stream.Stream<? extends T> javaStream) {
Objects.requireNonNull(javaStream, "javaStream is null");
return ofAll(io.vavr.collection.Iterator.ofAll(javaStream.iterator()));
}
/**
* Returns a Tree containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Tree
* @param n The number of elements in the Tree
* @param f The Function computing element values
* @return A Tree consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
static <T> Tree<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return io.vavr.collection.Collections.tabulate(n, f, empty(), Tree::of);
}
/**
* Returns a Tree containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Tree
* @param n The number of elements in the Tree
* @param s The Supplier computing element values
* @return A Tree of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
static <T> Tree<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return io.vavr.collection.Collections.fill(n, s, empty(), Tree::of);
}
/**
* Recursively builds a non-empty {@code Tree}, starting with the given {@code seed} value and proceeding in depth-first order.
* <p>
* The children of a node are created by
* <ol>
* <li>applying the {@code descend} function to the node value</li>
* <li>calling this method recursively by using each derived child value as new seed (in iteration order).</li>
* </ol>
* <p>
* Example:
* <pre>{@code
* // = (1 (2 4 5) 3)
* Tree.recurse(1, i ->
* (i == 1) ? List.of(2, 3) :
* (i == 2) ? List.(4, 5) :
* List.empty()
* ).toLispString();
* }</pre>
*
* @param seed The start value for the Tree
* @param descend A function to calculate the child values
* @param <T> Value type
* @return a new, non-empty {@code Tree} instance
* @throws NullPointerException if {@code descend} is null
*/
static <T> Node<T> recurse(T seed, Function<? super T, ? extends Iterable<? extends T>> descend) {
Objects.requireNonNull(descend, "descend is null");
return Tree.of(seed, Stream.of(seed).flatMap(descend).map(children -> recurse(children, descend)));
}
/**
* Build a {@code List} with roots of {@code Tree} from flat source.
* <p>
* {@code parentMapper} must return {@code null} for root element.
*
* <pre>{@code
* // = [(1, null, "I"), (2, 1, "II"), (3, 1, "III"), (4, 2, "IV"), (5, 2, "V")]
* List<MenuItem> items = ...; // MenuItem(id, parentId, label)
*
* // I
* // / \
* // II III
* // /\
* // IV V
* Tree<MenuItem> menu = Tree.build(items, MenuItem::getId, MenuItem::getParentId);
* }</pre>
*
* @param source Flat source
* @param idMapper A mapper from source item to unique identifier of that item
* @param parentMapper A mapper from source item to unique identifier of parent item. Need return null for root items
* @param <T> Value type
* @param <ID> Id type
* @return a new, maybe empty {@code List} instance with non-empty {@code Tree} instances
* @throws NullPointerException if {@code source}, {@code idMapper} or {@code parentMapper} is null
*/
static <T, ID> List<Node<T>> build(Iterable<? extends T> source, Function<? super T, ? extends ID> idMapper, Function<? super T, ? extends ID> parentMapper) {
Objects.requireNonNull(source, "source is null");
Objects.requireNonNull(source, "idMapper is null");
Objects.requireNonNull(source, "parentMapper is null");
final List<T> list = List.ofAll(source);
final Map<ID, List<T>> byParent = list.groupBy(parentMapper);
final Function<? super T, Iterable<? extends T>> descend = idMapper
.andThen(byParent::get)
.andThen(o -> o.getOrElse(List::empty));
final List<T> roots = byParent.get(null).getOrElse(List::empty);
return roots.map(v -> recurse(v, descend));
}
@Override
default <R> Tree<R> collect(PartialFunction<? super T, ? extends R> partialFunction) {
return ofAll(iterator().<R> collect(partialFunction));
}
/**
* Gets the value of this tree.
*
* @return The value of this tree.
* @throws java.lang.UnsupportedOperationException if this tree is empty
*/
T getValue();
/**
* Returns the children of this tree.
*
* @return the tree's children
*/
io.vavr.collection.List<Node<T>> getChildren();
/**
* Checks if this Tree is a leaf. A tree is a leaf if it is a Node with no children.
* Because the empty tree is no Node, it is not a leaf by definition.
*
* @return true if this tree is a leaf, false otherwise.
*/
boolean isLeaf();
/**
* Checks if this Tree is a branch. A Tree is a branch if it is a Node which has children.
* Because the empty tree is not a Node, it is not a branch by definition.
*
* @return true if this tree is a branch, false otherwise.
*/
default boolean isBranch() {
return !(isEmpty() || isLeaf());
}
/**
* A {@code Tree} is computed synchronously.
*
* @return false
*/
@Override
default boolean isAsync() {
return false;
}
@Override
default boolean isDistinct() {
return false;
}
/**
* A {@code Tree} is computed eagerly.
*
* @return false
*/
@Override
default boolean isLazy() {
return false;
}
@Override
default boolean isSequential() {
return true;
}
/**
* Traverses this tree values in a specific {@link Order}.
*
* @param order A traversal order
* @return A new Iterator
*/
default io.vavr.collection.Iterator<T> iterator(Order order) {
return values(order).iterator();
}
/**
* Creates a <a href="https://www.tutorialspoint.com/lisp/lisp_tree.htm">Lisp-like</a> representation of this {@code Tree}.
*
* @return This {@code Tree} as Lisp-string, i.e. represented as list of lists.
*/
String toLispString();
/**
* Transforms this {@code Tree}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Tree<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
/**
* Traverses this tree in {@link Order#PRE_ORDER}.
*
* @return A sequence of nodes.
*/
default Seq<Node<T>> traverse() {
return traverse(PRE_ORDER);
}
/**
* Traverses this tree in a specific order.
*
* @param order the tree traversal order
* @return A sequence of nodes.
* @throws java.lang.NullPointerException if order is null
*/
default Seq<Node<T>> traverse(Order order) {
Objects.requireNonNull(order, "order is null");
if (isEmpty()) {
return Stream.empty();
} else {
final Node<T> node = (Node<T>) this;
switch (order) {
case PRE_ORDER:
return TreeModule.traversePreOrder(node);
case IN_ORDER:
return TreeModule.traverseInOrder(node);
case POST_ORDER:
return TreeModule.traversePostOrder(node);
case LEVEL_ORDER:
return TreeModule.traverseLevelOrder(node);
default:
throw new IllegalStateException("Unknown order: " + order.name());
}
}
}
/**
* Traverses this tree values in {@link Order#PRE_ORDER}.
* Syntactic sugar for {@code traverse().map(Node::getValue)}.
*
* @return A sequence of the tree values.
*/
default Seq<T> values() {
return traverse(PRE_ORDER).map(Node::getValue);
}
/**
* Traverses this tree values in a specific order.
* Syntactic sugar for {@code traverse(order).map(Node::getValue)}.
*
* @param order the tree traversal order
* @return A sequence of the tree values.
* @throws java.lang.NullPointerException if order is null
*/
default Seq<T> values(Order order) {
return traverse(order).map(Node::getValue);
}
/**
* Counts the number of branches of this tree. The empty tree and a leaf have no branches.
*
* @return The number of branches of this tree.
*/
default int branchCount() {
if (isEmpty() || isLeaf()) {
return 0;
} else {
return getChildren().foldLeft(1, (count, child) -> count + child.branchCount());
}
}
/**
* Counts the number of leaves of this tree. The empty tree has no leaves.
*
* @return The number of leaves of this tree.
*/
default int leafCount() {
if (isEmpty()) {
return 0;
} else if (isLeaf()) {
return 1;
} else {
return getChildren().foldLeft(0, (count, child) -> count + child.leafCount());
}
}
/**
* Counts the number of nodes (i.e. branches and leaves) of this tree. The empty tree has no nodes.
*
* @return The number of nodes of this tree.
*/
default int nodeCount() {
return length();
}
// -- Methods inherited from Traversable
@Override
default Seq<T> distinct() {
return values().distinct();
}
@Override
default Seq<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
if (isEmpty()) {
return Stream.empty();
} else {
return values().distinctBy(comparator);
}
}
@Override
default <U> Seq<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
if (isEmpty()) {
return Stream.empty();
} else {
return values().distinctBy(keyExtractor);
}
}
@Override
default Seq<T> drop(int n) {
if (n >= length()) {
return Stream.empty();
} else {
return values().drop(n);
}
}
@Override
default Seq<T> dropRight(int n) {
if (n >= length()) {
return Stream.empty();
} else {
return values().dropRight(n);
}
}
@Override
default Seq<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
default Seq<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Stream.empty();
} else {
return values().dropWhile(predicate);
}
}
@Override
default Seq<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Stream.empty();
} else {
return values().filter(predicate);
}
}
@Override
default Seq<T> reject(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Stream.empty();
} else {
return values().reject(predicate);
}
}
@Override
default <U> Tree<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? Empty.instance() : TreeModule.flatMap((Node<T>) this, mapper);
}
@Override
default <U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
if (isEmpty()) {
return zero;
} else {
return iterator().foldRight(zero, f);
}
}
@SuppressWarnings("unchecked")
@Override
default <C> Map<C, Seq<T>> groupBy(Function<? super T, ? extends C> classifier) {
return io.vavr.collection.Collections.groupBy(values(), classifier, Stream::ofAll);
}
@Override
default io.vavr.collection.Iterator<Seq<T>> grouped(int size) {
return sliding(size, size);
}
@Override
default boolean hasDefiniteSize() {
return true;
}
@Override
default T head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty tree");
} else {
return iterator().next();
}
}
@Override
default Seq<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty tree");
} else {
return values().init();
}
}
@Override
default Option<Seq<T>> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
@Override
default boolean isTraversableAgain() {
return true;
}
@Override
default io.vavr.collection.Iterator<T> iterator() {
return values().iterator();
}
@Override
default <U> Tree<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? Empty.instance() : TreeModule.map((Node<T>) this, mapper);
}
@Override
default Tree<T> orElse(Iterable<? extends T> other) {
return isEmpty() ? ofAll(other) : this;
}
@Override
default Tree<T> orElse(Supplier<? extends Iterable<? extends T>> supplier) {
return isEmpty() ? ofAll(supplier.get()) : this;
}
@SuppressWarnings("unchecked")
@Override
default Tuple2<Seq<T>, Seq<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(Stream.empty(), Stream.empty());
} else {
return (Tuple2<Seq<T>, Seq<T>>) values().partition(predicate);
}
}
@Override
default Tree<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(head());
}
return this;
}
@Override
default Tree<T> replace(T currentElement, T newElement) {
if (isEmpty()) {
return Empty.instance();
} else {
return TreeModule.replace((Node<T>) this, currentElement, newElement);
}
}
@Override
default Tree<T> replaceAll(T currentElement, T newElement) {
return map(t -> Objects.equals(t, currentElement) ? newElement : t);
}
@Override
default Seq<T> retainAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return values().retainAll(elements);
}
@Override
default Seq<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
default <U> Seq<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
return io.vavr.collection.Collections.scanLeft(this, zero, operation, io.vavr.collection.Iterator::toStream);
}
@Override
default <U> Seq<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
return io.vavr.collection.Collections.scanRight(this, zero, operation, io.vavr.collection.Iterator::toStream);
}
@Override
default io.vavr.collection.Iterator<Seq<T>> slideBy(Function<? super T, ?> classifier) {
return iterator().slideBy(classifier);
}
@Override
default io.vavr.collection.Iterator<Seq<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
default io.vavr.collection.Iterator<Seq<T>> sliding(int size, int step) {
return iterator().sliding(size, step);
}
@SuppressWarnings("unchecked")
@Override
default Tuple2<Seq<T>, Seq<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(Stream.empty(), Stream.empty());
} else {
return (Tuple2<Seq<T>, Seq<T>>) values().span(predicate);
}
}
@Override
default String stringPrefix() {
return "Tree";
}
@Override
default Seq<T> tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty tree");
} else {
return values().tail();
}
}
@Override
default Option<Seq<T>> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
default Seq<T> take(int n) {
if (isEmpty()) {
return Stream.empty();
} else {
return values().take(n);
}
}
@Override
default Seq<T> takeRight(int n) {
if (isEmpty()) {
return Stream.empty();
} else {
return values().takeRight(n);
}
}
@Override
default Seq<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return values().takeUntil(predicate);
}
@Override
default Seq<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return values().takeWhile(predicate);
}
@SuppressWarnings("unchecked")
@Override
default <T1, T2> Tuple2<Tree<T1>, Tree<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
if (isEmpty()) {
return Tuple.of(Empty.instance(), Empty.instance());
} else {
return (Tuple2<Tree<T1>, Tree<T2>>) (Object) TreeModule.unzip((Node<T>) this, unzipper);
}
}
@SuppressWarnings("unchecked")
@Override
default <T1, T2, T3> Tuple3<Tree<T1>, Tree<T2>, Tree<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
if (isEmpty()) {
return Tuple.of(Empty.instance(), Empty.instance(), Empty.instance());
} else {
return (Tuple3<Tree<T1>, Tree<T2>, Tree<T3>>) (Object) TreeModule.unzip3((Node<T>) this, unzipper);
}
}
@Override
default <U> Tree<Tuple2<T, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
default <U, R> Tree<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Empty.instance();
} else {
return TreeModule.zip((Node<T>) this, that.iterator(), mapper);
}
}
@Override
default <U> Tree<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
if (isEmpty()) {
return io.vavr.collection.Iterator.<U> ofAll(that).map(elem -> Tuple.of(thisElem, elem)).toTree();
} else {
final java.util.Iterator<? extends U> thatIter = that.iterator();
final Tree<Tuple2<T, U>> tree = TreeModule.zipAll((Node<T>) this, thatIter, thatElem);
if (thatIter.hasNext()) {
final Iterable<Node<Tuple2<T, U>>> remainder = io.vavr.collection.Iterator
.ofAll(thatIter)
.map(elem -> of(Tuple.of(thisElem, elem)));
return new Node<>(tree.getValue(), tree.getChildren().appendAll(remainder));
} else {
return tree;
}
}
}
@Override
default Tree<Tuple2<T, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
default <U> Tree<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return zipWith(io.vavr.collection.Iterator.from(0), mapper);
}
@Override
boolean equals(Object o);
@Override
int hashCode();
@Override
String toString();
/**
* Creates a neat 2-dimensional drawing of a tree. Unicode characters are used to draw node junctions.
*
* @return A nice string representation of the tree.
*/
String draw();
/**
* Represents a tree node.
*
* @param <T> value type
*/
final class Node<T> implements Tree<T>, Serializable {
private static final long serialVersionUID = 1L;
private final T value;
private final io.vavr.collection.List<Node<T>> children;
private final int size;
/**
* Constructs a rose tree branch.
*
* @param value A value.
* @param children A non-empty list of children.
* @throws NullPointerException if children is null
* @throws IllegalArgumentException if children is empty
*/
public Node(T value, io.vavr.collection.List<Node<T>> children) {
Objects.requireNonNull(children, "children is null");
this.value = value;
this.children = children;
this.size = children.foldLeft(1, (acc, child) -> acc + child.size);
}
@Override
public io.vavr.collection.List<Node<T>> getChildren() {
return children;
}
@Override
public T getValue() {
return value;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public int length() {
return size;
}
@Override
public boolean isLeaf() {
return size == 1;
}
@Override
public T last() {
return children.isEmpty() ? value : children.last().last();
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof Node) {
final Node<?> that = (Node<?>) o;
return Objects.equals(this.getValue(), that.getValue())
&& Objects.equals(this.getChildren(), that.getChildren());
} else {
return false;
}
}
@Override
public int hashCode() {
return HashCodes.hash(value, children);
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
@Override
public String toLispString() {
return toLispString(this);
}
@Override
public String draw() {
final StringBuilder builder = new StringBuilder();
drawAux("", builder);
return builder.toString();
}
private void drawAux(String indent, StringBuilder builder) {
builder.append(value);
for (io.vavr.collection.List<Node<T>> it = children; !it.isEmpty(); it = it.tail()) {
final boolean isLast = it.tail().isEmpty();
builder.append('\n')
.append(indent)
.append(isLast ? "└──" : "├──");
it.head().drawAux(indent + (isLast ? " " : "│ "), builder);
}
}
private static String toLispString(Tree<?> tree) {
final String value = String.valueOf(tree.getValue());
if (tree.isLeaf()) {
return value;
} else {
final String children = tree.getChildren().map(child -> toLispString(child)).mkString(" ");
return "(" + value + " " + children + ")";
}
}
// -- Serializable implementation
/**
* {@code writeReplace} method for the serialization proxy pattern.
* <p>
* The presence of this method causes the serialization system to emit a SerializationProxy instance instead of
* an instance of the enclosing class.
*
* @return A SerializationProxy for this enclosing class.
*/
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private Object writeReplace() {
return new SerializationProxy<>(this);
}
/**
* {@code readObject} method for the serialization proxy pattern.
* <p>
* Guarantees that the serialization system will never generate a serialized instance of the enclosing class.
*
* @param stream An object serialization stream.
* @throws java.io.InvalidObjectException This method will throw with the message "Proxy required".
*/
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
/**
* A serialization proxy which, in this context, is used to deserialize immutable nodes with final
* instance fields.
*
* @param <T> The component type of the underlying tree.
*/
// DEV NOTE: The serialization proxy pattern is not compatible with non-final, i.e. extendable,
// classes. Also, it may not be compatible with circular object graphs.
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private static final class SerializationProxy<T> implements Serializable {
private static final long serialVersionUID = 1L;
// the instance to be serialized/deserialized
private transient Node<T> node;
/**
* Constructor for the case of serialization, called by {@link Node#writeReplace()}.
* <p/>
* The constructor of a SerializationProxy takes an argument that concisely represents the logical state of
* an instance of the enclosing class.
*
* @param node a Branch
*/
SerializationProxy(Node<T> node) {
this.node = node;
}
/**
* Write an object to a serialization stream.
*
* @param s An object serialization stream.
* @throws java.io.IOException If an error occurs writing to the stream.
*/
private void writeObject(ObjectOutputStream s) throws IOException {
s.defaultWriteObject();
s.writeObject(node.value);
s.writeObject(node.children);
}
/**
* Read an object from a deserialization stream.
*
* @param s An object deserialization stream.
* @throws ClassNotFoundException If the object's class read from the stream cannot be found.
* @throws IOException If an error occurs reading from the stream.
*/
@SuppressWarnings("unchecked")
private void readObject(ObjectInputStream s) throws ClassNotFoundException, IOException {
s.defaultReadObject();
final T value = (T) s.readObject();
final io.vavr.collection.List<Node<T>> children = (io.vavr.collection.List<Node<T>>) s.readObject();
node = new Node<>(value, children);
}
/**
* {@code readResolve} method for the serialization proxy pattern.
* <p>
* Returns a logically equivalent instance of the enclosing class. The presence of this method causes the
* serialization system to translate the serialization proxy back into an instance of the enclosing class
* upon deserialization.
*
* @return A deserialized instance of the enclosing class.
*/
private Object readResolve() {
return node;
}
}
}
/**
* The empty tree. Use Tree.empty() to create an instance.
*
* @param <T> type of the tree's values
*/
final class Empty<T> implements Tree<T>, Serializable {
private static final long serialVersionUID = 1L;
private static final Empty<?> INSTANCE = new Empty<>();
// hidden
private Empty() {
}
@SuppressWarnings("unchecked")
public static <T> Empty<T> instance() {
return (Empty<T>) INSTANCE;
}
@Override
public io.vavr.collection.List<Node<T>> getChildren() {
return Nil.instance();
}
@Override
public T getValue() {
throw new UnsupportedOperationException("getValue of empty Tree");
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public int length() {
return 0;
}
@Override
public boolean isLeaf() {
return false;
}
@Override
public T last() {
throw new NoSuchElementException("last of empty tree");
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return stringPrefix() + "()";
}
@Override
public String toLispString() {
return "()";
}
@Override
public String draw() { return "▣"; }
// -- Serializable implementation
/**
* Instance control for object serialization.
*
* @return The singleton instance of Nil.
* @see java.io.Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
/**
* Tree traversal order.
* <p>
* Example tree:
* <pre>
* <code>
* 1
* / \
* / \
* / \
* 2 3
* / \ /
* 4 5 6
* / / \
* 7 8 9
* </code>
* </pre>
* <p>
* See also
* <ul>
* <li><a href="http://en.wikipedia.org/wiki/Tree_traversal">Tree traversal</a> (wikipedia)</li>
* <li>See <a href="http://rosettacode.org/wiki/Tree_traversal">Tree traversal</a> (rosetta code)</li>
* </ul>
*/
// see http://programmers.stackexchange.com/questions/138766/in-order-traversal-of-m-way-trees
enum Order {
/**
* 1 2 4 7 5 3 6 8 9 (= depth-first)
*/
PRE_ORDER,
/**
* 7 4 2 5 1 8 6 9 3
*/
IN_ORDER,
/**
* 7 4 5 2 8 9 6 3 1
*/
POST_ORDER,
/**
* 1 2 3 4 5 6 7 8 9 (= breadth-first)
*/
LEVEL_ORDER
}
}
/**
* Because the empty tree {@code Empty} cannot be a child of an existing tree, method implementations distinguish between the
* empty and non-empty case. Because the structure of trees is recursive, often we have commands in the form of module
* classes with one static method.
*/
interface TreeModule {
@SuppressWarnings("unchecked")
static <T, U> Tree<U> flatMap(Node<T> node, Function<? super T, ? extends Iterable<? extends U>> mapper) {
final Tree<U> mapped = ofAll(mapper.apply(node.getValue()));
if (mapped.isEmpty()) {
return empty();
} else {
final io.vavr.collection.List<Node<U>> children = (io.vavr.collection.List<Node<U>>) (Object) node
.getChildren()
.map(child -> flatMap(child, mapper))
.filter(Tree::nonEmpty);
return of(mapped.get(), children.prependAll(mapped.getChildren()));
}
}
static <T, U> Node<U> map(Node<T> node, Function<? super T, ? extends U> mapper) {
final U value = mapper.apply(node.getValue());
final io.vavr.collection.List<Node<U>> children = node.getChildren().map(child -> map(child, mapper));
return new Node<>(value, children);
}
// Idea:
// Traverse (depth-first) until a match is found, then stop and rebuild relevant parts of the tree.
// If not found, return the same tree instance.
static <T> Node<T> replace(Node<T> node, T currentElement, T newElement) {
if (Objects.equals(node.getValue(), currentElement)) {
return new Node<>(newElement, node.getChildren());
} else {
for (Node<T> child : node.getChildren()) {
final Node<T> newChild = replace(child, currentElement, newElement);
final boolean found = newChild != child;
if (found) {
final io.vavr.collection.List<Node<T>> newChildren = node.getChildren().replace(child, newChild);
return new Node<>(node.getValue(), newChildren);
}
}
return node;
}
}
static <T> Stream<Node<T>> traversePreOrder(Node<T> node) {
return node.getChildren().foldLeft(Stream.of(node),
(acc, child) -> acc.appendAll(traversePreOrder(child)));
}
static <T> Stream<Node<T>> traverseInOrder(Node<T> node) {
if (node.isLeaf()) {
return Stream.of(node);
} else {
final io.vavr.collection.List<Node<T>> children = node.getChildren();
return children
.tail()
.foldLeft(Stream.<Node<T>> empty(), (acc, child) -> acc.appendAll(traverseInOrder(child)))
.prepend(node)
.prependAll(traverseInOrder(children.head()));
}
}
static <T> Stream<Node<T>> traversePostOrder(Node<T> node) {
return node
.getChildren()
.foldLeft(Stream.<Node<T>> empty(), (acc, child) -> acc.appendAll(traversePostOrder(child)))
.append(node);
}
static <T> Stream<Node<T>> traverseLevelOrder(Node<T> node) {
Stream<Node<T>> result = Stream.empty();
final java.util.Queue<Node<T>> queue = new java.util.LinkedList<>();
queue.add(node);
while (!queue.isEmpty()) {
final Node<T> next = queue.remove();
result = result.prepend(next);
queue.addAll(next.getChildren().toJavaList());
}
return result.reverse();
}
static <T, T1, T2> Tuple2<Node<T1>, Node<T2>> unzip(Node<T> node,
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
final Tuple2<? extends T1, ? extends T2> value = unzipper.apply(node.getValue());
final io.vavr.collection.List<Tuple2<Node<T1>, Node<T2>>> children = node
.getChildren()
.map(child -> unzip(child, unzipper));
final Node<T1> node1 = new Node<>(value._1, children.map(t -> t._1));
final Node<T2> node2 = new Node<>(value._2, children.map(t -> t._2));
return Tuple.of(node1, node2);
}
static <T, T1, T2, T3> Tuple3<Node<T1>, Node<T2>, Node<T3>> unzip3(Node<T> node,
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
final Tuple3<? extends T1, ? extends T2, ? extends T3> value = unzipper.apply(node.getValue());
final io.vavr.collection.List<Tuple3<Node<T1>, Node<T2>, Node<T3>>> children = node.getChildren()
.map(child -> unzip3(child, unzipper));
final Node<T1> node1 = new Node<>(value._1, children.map(t -> t._1));
final Node<T2> node2 = new Node<>(value._2, children.map(t -> t._2));
final Node<T3> node3 = new Node<>(value._3, children.map(t -> t._3));
return Tuple.of(node1, node2, node3);
}
@SuppressWarnings("unchecked")
static <T, U, R> Tree<R> zip(Node<T> node, java.util.Iterator<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
if (!that.hasNext()) {
return Empty.instance();
} else {
final R value = mapper.apply(node.getValue(), that.next());
final io.vavr.collection.List<Node<R>> children = (io.vavr.collection.List<Node<R>>) (Object) node
.getChildren()
.map(child -> zip(child, that, mapper))
.filter(Tree::nonEmpty);
return new Node<>(value, children);
}
}
@SuppressWarnings("unchecked")
static <T, U> Tree<Tuple2<T, U>> zipAll(Node<T> node, java.util.Iterator<? extends U> that, U thatElem) {
if (!that.hasNext()) {
return node.map(value -> Tuple.of(value, thatElem));
} else {
final Tuple2<T, U> value = Tuple.of(node.getValue(), that.next());
final io.vavr.collection.List<Node<Tuple2<T, U>>> children = (io.vavr.collection.List<Node<Tuple2<T, U>>>) (Object) node
.getChildren()
.map(child -> zipAll(child, that, thatElem))
.filter(Tree::nonEmpty);
return new Node<>(value, children);
}
}
}
| 1 | 12,933 | (...), where each element ~are~ **is the** given {\@code element}. | vavr-io-vavr | java |
@@ -73,6 +73,7 @@ abstract class BaseFile<F>
private long[] splitOffsets = null;
private int[] equalityIds = null;
private byte[] keyMetadata = null;
+ private Integer sortOrderId;
// cached schema
private transient Schema avroSchema = null; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.ArrayUtil;
import org.apache.iceberg.util.ByteBuffers;
/**
* Base class for both {@link DataFile} and {@link DeleteFile}.
*/
abstract class BaseFile<F>
implements ContentFile<F>, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of();
static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) {
@Override
public PartitionData copy() {
return this; // this does not change
}
};
private int[] fromProjectionPos;
private Types.StructType partitionType;
private Long fileOrdinal = null;
private int partitionSpecId = -1;
private FileContent content = FileContent.DATA;
private String filePath = null;
private FileFormat format = null;
private PartitionData partitionData = null;
private Long recordCount = null;
private long fileSizeInBytes = -1L;
// optional fields
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, Long> nanValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
private long[] splitOffsets = null;
private int[] equalityIds = null;
private byte[] keyMetadata = null;
// cached schema
private transient Schema avroSchema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
BaseFile(Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType();
// partition type may be null if the field was not projected
Type partType = schema.fieldType("partition");
if (partType != null) {
this.partitionType = partType.asNestedType().asStructType();
} else {
this.partitionType = EMPTY_STRUCT_TYPE;
}
List<Types.NestedField> fields = schema.fields();
List<Types.NestedField> allFields = Lists.newArrayList();
allFields.addAll(DataFile.getType(partitionType).fields());
allFields.add(MetadataColumns.ROW_POSITION);
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
this.partitionData = new PartitionData(partitionType);
}
BaseFile(int specId, FileContent content, String filePath, FileFormat format,
PartitionData partition, long fileSizeInBytes, long recordCount,
Map<Integer, Long> columnSizes, Map<Integer, Long> valueCounts,
Map<Integer, Long> nullValueCounts, Map<Integer, Long> nanValueCounts,
Map<Integer, ByteBuffer> lowerBounds, Map<Integer, ByteBuffer> upperBounds, List<Long> splitOffsets,
int[] equalityFieldIds, ByteBuffer keyMetadata) {
this.partitionSpecId = specId;
this.content = content;
this.filePath = filePath;
this.format = format;
// this constructor is used by DataFiles.Builder, which passes null for unpartitioned data
if (partition == null) {
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
} else {
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
}
// this will throw NPE if metrics.recordCount is null
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.columnSizes = columnSizes;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
this.nanValueCounts = nanValueCounts;
this.lowerBounds = SerializableByteBufferMap.wrap(lowerBounds);
this.upperBounds = SerializableByteBufferMap.wrap(upperBounds);
this.splitOffsets = ArrayUtil.toLongArray(splitOffsets);
this.equalityIds = equalityFieldIds;
this.keyMetadata = ByteBuffers.toByteArray(keyMetadata);
}
/**
* Copy constructor.
*
* @param toCopy a generic data file to copy.
* @param fullCopy whether to copy all fields or to drop column-level stats
*/
BaseFile(BaseFile<F> toCopy, boolean fullCopy) {
this.fileOrdinal = toCopy.fileOrdinal;
this.partitionSpecId = toCopy.partitionSpecId;
this.content = toCopy.content;
this.filePath = toCopy.filePath;
this.format = toCopy.format;
this.partitionData = toCopy.partitionData.copy();
this.partitionType = toCopy.partitionType;
this.recordCount = toCopy.recordCount;
this.fileSizeInBytes = toCopy.fileSizeInBytes;
if (fullCopy) {
// TODO: support lazy conversion to/from map
this.columnSizes = copy(toCopy.columnSizes);
this.valueCounts = copy(toCopy.valueCounts);
this.nullValueCounts = copy(toCopy.nullValueCounts);
this.nanValueCounts = copy(toCopy.nanValueCounts);
this.lowerBounds = SerializableByteBufferMap.wrap(copy(toCopy.lowerBounds));
this.upperBounds = SerializableByteBufferMap.wrap(copy(toCopy.upperBounds));
} else {
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.nanValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
}
this.fromProjectionPos = toCopy.fromProjectionPos;
this.keyMetadata = toCopy.keyMetadata == null ? null : Arrays.copyOf(toCopy.keyMetadata, toCopy.keyMetadata.length);
this.splitOffsets = toCopy.splitOffsets == null ? null :
Arrays.copyOf(toCopy.splitOffsets, toCopy.splitOffsets.length);
this.equalityIds = toCopy.equalityIds != null ? Arrays.copyOf(toCopy.equalityIds, toCopy.equalityIds.length) : null;
}
/**
* Constructor for Java serialization.
*/
BaseFile() {
}
@Override
public int specId() {
return partitionSpecId;
}
void setSpecId(int specId) {
this.partitionSpecId = specId;
}
protected abstract Schema getAvroSchema(Types.StructType partitionStruct);
@Override
public Schema getSchema() {
if (avroSchema == null) {
this.avroSchema = getAvroSchema(partitionType);
}
return avroSchema;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
this.content = value != null ? FileContent.values()[(Integer) value] : FileContent.DATA;
return;
case 1:
// always coerce to String for Serializable
this.filePath = value.toString();
return;
case 2:
this.format = FileFormat.valueOf(value.toString());
return;
case 3:
this.partitionData = (PartitionData) value;
return;
case 4:
this.recordCount = (Long) value;
return;
case 5:
this.fileSizeInBytes = (Long) value;
return;
case 6:
this.columnSizes = (Map<Integer, Long>) value;
return;
case 7:
this.valueCounts = (Map<Integer, Long>) value;
return;
case 8:
this.nullValueCounts = (Map<Integer, Long>) value;
return;
case 9:
this.nanValueCounts = (Map<Integer, Long>) value;
return;
case 10:
this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value);
return;
case 11:
this.upperBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) value);
return;
case 12:
this.keyMetadata = ByteBuffers.toByteArray((ByteBuffer) value);
return;
case 13:
this.splitOffsets = ArrayUtil.toLongArray((List<Long>) value);
return;
case 14:
this.equalityIds = ArrayUtil.toIntArray((List<Integer>) value);
return;
case 15:
this.fileOrdinal = (long) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public <T> void set(int pos, T value) {
put(pos, value);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return content.id();
case 1:
return filePath;
case 2:
return format != null ? format.toString() : null;
case 3:
return partitionData;
case 4:
return recordCount;
case 5:
return fileSizeInBytes;
case 6:
return columnSizes;
case 7:
return valueCounts;
case 8:
return nullValueCounts;
case 9:
return nanValueCounts;
case 10:
return lowerBounds;
case 11:
return upperBounds;
case 12:
return keyMetadata();
case 13:
return splitOffsets();
case 14:
return equalityFieldIds();
case 15:
return pos;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public int size() {
return DataFile.getType(EMPTY_STRUCT_TYPE).fields().size();
}
@Override
public Long pos() {
return fileOrdinal;
}
@Override
public FileContent content() {
return content;
}
@Override
public CharSequence path() {
return filePath;
}
@Override
public FileFormat format() {
return format;
}
@Override
public StructLike partition() {
return partitionData;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return fileSizeInBytes;
}
@Override
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, Long> nanValueCounts() {
return nanValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public ByteBuffer keyMetadata() {
return keyMetadata != null ? ByteBuffer.wrap(keyMetadata) : null;
}
@Override
public List<Long> splitOffsets() {
return ArrayUtil.toLongList(splitOffsets);
}
@Override
public List<Integer> equalityFieldIds() {
return ArrayUtil.toIntList(equalityIds);
}
private static <K, V> Map<K, V> copy(Map<K, V> map) {
if (map != null) {
Map<K, V> copy = Maps.newHashMapWithExpectedSize(map.size());
copy.putAll(map);
return Collections.unmodifiableMap(copy);
}
return null;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("content", content.toString().toLowerCase(Locale.ROOT))
.add("file_path", filePath)
.add("file_format", format)
.add("partition", partitionData)
.add("record_count", recordCount)
.add("file_size_in_bytes", fileSizeInBytes)
.add("column_sizes", columnSizes)
.add("value_counts", valueCounts)
.add("null_value_counts", nullValueCounts)
.add("nan_value_counts", nanValueCounts)
.add("lower_bounds", lowerBounds)
.add("upper_bounds", upperBounds)
.add("key_metadata", keyMetadata == null ? "null" : "(redacted)")
.add("split_offsets", splitOffsets == null ? "null" : splitOffsets())
.add("equality_ids", equalityIds == null ? "null" : equalityFieldIds())
.toString();
}
}
| 1 | 30,853 | I think this can be an int because we have a default that is always valid, `0`. | apache-iceberg | java |
@@ -141,6 +141,17 @@ describe Section do
end
end
+ describe '.upcoming?' do
+ it 'knows if it has happened yet' do
+ next_week = Section.new(starts_on: 1.week.from_now)
+ last_week = Section.new(starts_on: 1.week.ago)
+ today = Section.new(starts_on: Date.today)
+ expect(today).to be_upcoming
+ expect(next_week).to be_upcoming
+ expect(last_week).not_to be_upcoming
+ end
+ end
+
describe '#fulfillment_method' do
it 'returns in-person if the workshop is an in-person one' do
in_person_workshop = create(:workshop, online: false) | 1 | require 'spec_helper'
describe Section do
# Associations
it { should belong_to(:workshop) }
it { should have_many(:paid_purchases) }
it { should have_many(:purchases) }
it { should have_many(:section_teachers) }
it { should have_many(:unpaid_purchases) }
it { should have_many(:teachers).through(:section_teachers) }
# Validations
it { should validate_presence_of :address }
it { should validate_presence_of :ends_on }
it { should validate_presence_of :start_at }
it { should validate_presence_of :starts_on }
it { should validate_presence_of :stop_at }
describe 'self.active' do
it "only includes sections thats haven't started" do
active = create(:section, starts_on: Date.tomorrow, ends_on: 7.days.from_now)
create(:section, starts_on: 1.week.ago, ends_on: 7.days.from_now)
active2 = create(:section, starts_on: Date.today, ends_on: 7.days.from_now)
expect(Section.active).to eq [active2, active]
end
end
describe '#date_range' do
context 'when starts_on and ends_on are nil' do
it 'returns nil' do
section = Section.new
section.date_range.should be_nil
end
end
context 'when starts_on == ends_on' do
it 'returns a string representation of a single date' do
date = '20121102'
section = create(:section, starts_on: date, ends_on: date)
section.date_range.should eq('November 02, 2012')
end
end
context 'when starts_on and ends_on are different years' do
it 'includes month and year in both dates' do
section = create(:section, starts_on: '20121102', ends_on: '20131102')
section.date_range.should eq('November 02, 2012-November 02, 2013')
end
end
context 'when starts_on and ends_on are different months' do
it 'does not repeat the year' do
section = create(:section, starts_on: '20121102', ends_on: '20121202')
section.date_range.should eq('November 02-December 02, 2012')
end
end
context 'when starts_on and ends_on are different days' do
it 'does not repeat the month or year' do
section = create(:section, starts_on: '20121102', ends_on: '20121103')
section.date_range.should eq('November 02-03, 2012')
end
end
end
describe '#seats_available' do
let(:workshop) do
create :workshop, maximum_students: 8
end
context 'when seats_available is not set' do
it 'returns workshop.maximum_students' do
section = create(:section, workshop: workshop)
section.seats_available.should eq(8)
end
end
context 'when seats_available is set' do
it 'returns seats_available' do
section = create(:section, workshop: workshop, seats_available: 12)
section.seats_available.should eq(12)
end
end
end
describe '#send_reminders' do
it 'sends reminder emails to all paid registrants' do
section = create(:section)
create :purchase, purchaseable: section, paid: true
create :purchase, purchaseable: section, paid: true
create :purchase, purchaseable: section, paid: false, payment_method: 'paypal'
create :purchase, paid: true
ActionMailer::Base.deliveries.clear
section.send_reminders
ActionMailer::Base.deliveries.should have(2).email
end
end
describe '.send_reminders' do
it 'only sends reminders for a week from today' do
sections = [
create(:section, starts_on: 1.week.from_now),
create(:section, starts_on: 1.week.from_now + 1.day),
create(:section, starts_on: 1.week.from_now - 1.day)
]
sections.each do |section|
create :paid_purchase, purchaseable: section
end
ActionMailer::Base.deliveries.clear
Section.send_reminders
ActionMailer::Base.deliveries.should have(1).email
end
end
describe '#to_param' do
it 'returns the id and parameterized workshop name' do
section = create(:section)
expected = "#{section.id}-#{section.name.parameterize}"
section.to_param.should eq(expected)
end
end
describe '.unique_section_teachers_by_teacher' do
it 'returns 1 section_teacher per teacher' do
section_teacher_one = create(:section).section_teachers.first
section_teacher_two = create(:section).section_teachers.first
create(:section).teachers = [section_teacher_two.teacher]
expected = [section_teacher_one, section_teacher_two]
Section.unique_section_teachers_by_teacher.should eq(expected)
end
end
describe '.upcoming' do
it 'knows which sections are a week away' do
section = create(:section, starts_on: 1.week.from_now)
create :section, starts_on: 1.week.from_now + 1.day
create :section, starts_on: 1.week.from_now - 1.day
Section.upcoming.should eq([section])
end
end
describe '#fulfillment_method' do
it 'returns in-person if the workshop is an in-person one' do
in_person_workshop = create(:workshop, online: false)
section = create(:section, workshop: in_person_workshop)
expect(section.fulfillment_method).to eq('in-person')
end
it 'returns online if the workshop is an online one' do
online_workshop = create(:workshop, online: true)
section = create(:section, workshop: online_workshop)
expect(section.fulfillment_method).to eq('online')
end
end
end
describe Section do
context 'self.by_starts_on_desc' do
it 'returns sections newest to oldest by starts_on' do
old_section = create(:section, starts_on: 2.weeks.from_now)
new_section = create(:section, starts_on: 4.weeks.from_now)
Section.by_starts_on_desc.should == [new_section, old_section]
end
end
describe '#subscription?' do
it 'returns false' do
expect(Section.new).not_to be_subscription
end
end
describe 'purchase_for' do
it 'returns the purchase when a user has purchased a section' do
user = create(:user)
purchase = create(:online_section_purchase, user: user)
section = purchase.purchaseable
expect(section.purchase_for(user)).to eq purchase
end
it 'returns nil when a user has not purchased a product' do
user = create(:user)
purchase = create(:online_section_purchase)
section = purchase.purchaseable
expect(section.purchase_for(user)).to be_nil
end
end
describe '.current' do
it "does not include sections that haven't started or have finished" do
past = create(:past_section)
future = create(:future_section)
current = create(:section, starts_on: Date.today, ends_on: Date.tomorrow)
expect(Section.current).to include current
expect(Section.current).not_to include future
expect(Section.current).not_to include past
Timecop.freeze(Date.tomorrow) do
expect(Section.current).to include current
end
Timecop.freeze(Date.today + 2) do
expect(Section.current).not_to include current
end
end
end
describe '.send_notifications' do
it 'sends notifications for each current section' do
notifier = stub(send_notifications_for: nil)
Notifier.stubs(new: notifier)
future = create(:future_section)
workshop = future.workshop
current = create(:section, workshop: workshop, starts_on: Date.today, ends_on: Date.tomorrow)
future_purchase = create(:paid_purchase, purchaseable: future)
current_purchase = create(:paid_purchase, purchaseable: current)
video = create(:video, watchable: workshop)
event = create(:event, workshop: workshop)
Section.send_notifications
expect(Notifier).to have_received(:new).with(current, [current_purchase.email])
expect(notifier).to have_received(:send_notifications_for).with([video])
expect(notifier).to have_received(:send_notifications_for).with([event])
expect(Notifier).to have_received(:new).with(future, [future_purchase.email]).never
end
end
end
| 1 | 7,006 | I think you can use `build_stubbed` here for the same results but with more speed. | thoughtbot-upcase | rb |
@@ -491,6 +491,11 @@ class JMX(object):
if hold or (rampup and not iterations):
scheduler = True
+ if isinstance(rampup, numeric_types) and isinstance(hold, numeric_types):
+ duration = hold + rampup
+ else:
+ duration = 0
+
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname)
if on_error is not None: | 1 | """
Module holds base stuff regarding JMX format
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import traceback
from cssselect import GenericTranslator
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario, BetterDict
from bzt.six import etree, iteritems, string_types, parse, text_type, numeric_types
def cond_int(val):
if isinstance(val, float):
return int(val)
return val
class JMX(object):
"""
A class to manipulate and generate JMX test plans for JMeter
:param original: path to existing JMX to load. If it is None, then creates
empty test plan
"""
TEST_PLAN_SEL = "jmeterTestPlan>hashTree>hashTree"
THR_GROUP_SEL = TEST_PLAN_SEL + ">hashTree[type=tg]"
THR_TIMER = "kg.apc.jmeter.timers.VariableThroughputTimer"
def __init__(self, original=None, test_plan_name="BZT Generated Test Plan"):
self.log = logging.getLogger(self.__class__.__name__)
if original:
self.load(original)
else:
root = etree.Element("jmeterTestPlan")
self.tree = etree.ElementTree(root)
test_plan = etree.Element("TestPlan", guiclass="TestPlanGui",
testname=test_plan_name,
testclass="TestPlan")
htree = etree.Element("hashTree")
htree.append(test_plan)
htree.append(etree.Element("hashTree"))
self.append("jmeterTestPlan", htree)
element_prop = self._get_arguments_panel("TestPlan.user_defined_variables")
self.append("jmeterTestPlan>hashTree>TestPlan", element_prop)
def load(self, original):
"""
Load existing JMX file
:param original: JMX file path
:raise TaurusInternalException: in case of XML parsing error
"""
try:
self.tree = etree.ElementTree()
self.tree.parse(original)
except BaseException as exc:
msg = "XML parsing failed for file %s: %s"
raise TaurusInternalException(msg % (original, exc))
def get(self, selector):
"""
Returns tree elements by CSS selector
:type selector: str
:return:
"""
expression = GenericTranslator().css_to_xpath(selector)
nodes = self.tree.xpath(expression)
return nodes
def append(self, selector, node):
"""
Add node to container specified by selector. If multiple nodes will
match the selector, first of them will be used as container.
:param selector: CSS selector for container
:param node: Element instance to add
:raise TaurusInternalException: if container was not found
"""
container = self.get(selector)
if not len(container):
msg = "Failed to find TestPlan node in file: %s"
raise TaurusInternalException(msg % selector)
container[0].append(node)
def save(self, filename):
"""
Save JMX into file
:param filename:
"""
self.log.debug("Saving JMX to: %s", filename)
with open(filename, "wb") as fhd:
self.tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
@staticmethod
def _flag(flag_name, bool_value):
"""
Generates element for JMX flag node
:param flag_name:
:param bool_value:
:return:
"""
elm = etree.Element(flag_name)
elm.text = "true" if bool_value else "false"
return elm
@staticmethod
def __jtl_writer(filename, label, flags):
"""
Generates JTL writer
:param filename:
:return:
"""
jtl = etree.Element("stringProp", {"name": "filename"})
jtl.text = filename
name = etree.Element("name")
name.text = "saveConfig"
value = etree.Element("value")
value.set("class", "SampleSaveConfiguration")
for key, val in iteritems(flags):
value.append(JMX._flag(key, val))
obj_prop = etree.Element("objProp")
obj_prop.append(name)
obj_prop.append(value)
listener = etree.Element("ResultCollector",
testname=label,
testclass="ResultCollector",
guiclass="SimpleDataWriter")
listener.append(jtl)
listener.append(obj_prop)
return listener
@staticmethod
def new_kpi_listener(filename):
"""
Generates listener for writing basic KPI data in CSV format
:param filename:
:return:
"""
flags = {
"xml": False,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": False,
"encoding": False,
"assertions": False,
"subresults": False,
"responseData": False,
"samplerData": False,
"responseHeaders": False,
"requestHeaders": False,
"responseDataOnError": False,
"saveAssertionResultsFailureMessage": False,
"bytes": True,
"hostname": True,
"threadCounts": True,
"url": False
}
return JMX.__jtl_writer(filename, "KPI Writer", flags)
@staticmethod
def new_xml_listener(filename, is_full, user_flags):
"""
:param is_full: bool
:param filename: str
:param user_flags: BetterDict
:return:
"""
default_flags = {
"xml": True,
"fieldNames": True,
"time": True,
"timestamp": True,
"latency": True,
"success": True,
"label": True,
"code": True,
"message": True,
"threadName": True,
"dataType": True,
"encoding": True,
"assertions": True,
"subresults": True,
"responseData": False,
"samplerData": False,
"responseHeaders": True,
"requestHeaders": True,
"responseDataOnError": True,
"saveAssertionResultsFailureMessage": True,
"bytes": True,
"threadCounts": True,
"url": True
}
flags = BetterDict()
flags.merge(default_flags)
flags.merge(user_flags)
if is_full:
writer = JMX.__jtl_writer(filename, "Trace Writer", flags)
else:
writer = JMX.__jtl_writer(filename, "Errors Writer", flags)
writer.append(JMX._bool_prop("ResultCollector.error_logging", True))
return writer
@staticmethod
def _get_arguments_panel(name):
"""
Generates ArgumentsPanel node
:param name:
:return:
"""
return etree.Element("elementProp", name=name, elementType="Arguments",
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None, follow_redirects=True,
use_random_host_ip=False, host_ips=()):
"""
Generates HTTP request
:type method: str
:type label: str
:type url: str
:rtype: lxml.etree.Element
"""
proxy = etree.Element("HTTPSamplerProxy", guiclass="HttpTestSampleGui", testclass="HTTPSamplerProxy")
proxy.set("testname", label)
args = JMX._get_arguments_panel("HTTPsampler.Arguments")
if isinstance(body, string_types):
JMX.__add_body_from_string(args, body, proxy)
elif isinstance(body, dict):
JMX.__add_body_from_script(args, body, proxy)
elif body:
msg = "Cannot handle 'body' option of type %s: %s"
raise TaurusInternalException(msg % (type(body), body))
parsed_url = parse.urlparse(url)
JMX.__add_hostnameport_2sampler(parsed_url, proxy, url)
path = parsed_url.path
if parsed_url.query:
path += "?" + parsed_url.query
proxy.append(JMX._string_prop("HTTPSampler.path", path))
proxy.append(JMX._string_prop("HTTPSampler.method", method))
proxy.append(JMX._bool_prop("HTTPSampler.use_keepalive", keepalive))
proxy.append(JMX._bool_prop("HTTPSampler.follow_redirects", follow_redirects))
proxy.append(JMX._bool_prop("HTTPSampler.auto_redirects", False))
if timeout is not None:
proxy.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
proxy.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if encoding is not None:
proxy.append(JMX._string_prop("HTTPSampler.contentEncoding", encoding))
if files:
proxy.append(JMX._bool_prop("HTTPSampler.DO_MULTIPART_POST", True))
proxy.append(JMX._bool_prop("HTTPSampler.BROWSER_COMPATIBLE_MULTIPART", True))
files_prop = JMX._element_prop("HTTPsampler.Files", "HTTPFileArgs")
files_coll = JMX._collection_prop("HTTPFileArgs.files")
for file_dict in files:
file_elem = JMX._element_prop(file_dict['path'], "HTTPFileArg")
file_elem.append(JMX._string_prop("File.path", file_dict['path']))
file_elem.append(JMX._string_prop("File.paramname", file_dict["param"]))
file_elem.append(JMX._string_prop("File.mimetype", file_dict['mime-type']))
files_coll.append(file_elem)
files_prop.append(files_coll)
proxy.append(files_prop)
if use_random_host_ip and host_ips:
if len(host_ips) > 1:
expr = "${__chooseRandom(%s,randomAddr)}" % ",".join(host_ips)
else:
expr = host_ips[0]
proxy.append(JMX._string_prop("HTTPSampler.ipSource", expr))
return proxy
@staticmethod
def __add_body_from_string(args, body, proxy):
proxy.append(JMX._bool_prop("HTTPSampler.postBodyRaw", True))
coll_prop = JMX._collection_prop("Arguments.arguments")
header = JMX._element_prop("elementProp", "HTTPArgument")
try:
header.append(JMX._string_prop("Argument.value", body))
except ValueError:
logging.warning("Failed to set body: %s", traceback.format_exc())
header.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
coll_prop.append(header)
args.append(coll_prop)
proxy.append(args)
@staticmethod
def __add_body_from_script(args, body, proxy):
http_args_coll_prop = JMX._collection_prop("Arguments.arguments")
for arg_name, arg_value in body.items():
if not (isinstance(arg_value, string_types) or isinstance(arg_value, numeric_types)):
msg = 'Body field "%s: %s" requires "Content-Type: application/json" header'
raise TaurusInternalException(msg % (arg_name, arg_value))
try:
http_element_prop = JMX._element_prop(arg_name, "HTTPArgument")
except ValueError:
logging.warning("Failed to get element property: %s", traceback.format_exc())
http_element_prop = JMX._element_prop('BINARY-STUB', "HTTPArgument")
try:
http_element_prop.append(JMX._string_prop("Argument.name", arg_name))
except ValueError:
logging.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.name", "BINARY-STUB"))
try:
http_element_prop.append(
JMX._string_prop("Argument.value", arg_value if arg_value is not None else ''))
except ValueError:
logging.warning("Failed to set arg name: %s", traceback.format_exc())
http_element_prop.append(JMX._string_prop("Argument.value", "BINARY-STUB"))
http_element_prop.append(JMX._bool_prop("HTTPArgument.always_encode", True))
use_equals = arg_value is not None
http_element_prop.append(JMX._bool_prop("HTTPArgument.use_equals", arg_value is not None))
http_element_prop.append(JMX._string_prop("Argument.metadata", '=' if use_equals else ''))
http_args_coll_prop.append(http_element_prop)
args.append(http_args_coll_prop)
proxy.append(args)
@staticmethod
def __add_hostnameport_2sampler(parsed_url, proxy, url):
if parsed_url.scheme:
proxy.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc_parts = parsed_url.netloc.split(':')
if netloc_parts[0]:
proxy.append(JMX._string_prop("HTTPSampler.domain", netloc_parts[0]))
if len(netloc_parts) > 1 and netloc_parts[1]:
proxy.append(JMX._string_prop("HTTPSampler.port", netloc_parts[1]))
else:
try:
if parsed_url.port:
proxy.append(JMX._string_prop("HTTPSampler.port", parsed_url.port))
else:
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
except ValueError:
logging.debug("Non-parsable port: %s", url)
proxy.append(JMX._string_prop("HTTPSampler.port", ""))
@staticmethod
def _element_prop(name, element_type):
"""
Generates element property node
:param name:
:param element_type:
:return:
"""
res = etree.Element("elementProp", name=name, elementType=element_type)
return res
@staticmethod
def _collection_prop(name):
"""
Adds Collection prop
:param name:
:return:
"""
res = etree.Element("collectionProp", name=name)
return res
@staticmethod
def _string_prop(name, value):
"""
Generates string property node
:param name:
:param value:
:return:
"""
res = etree.Element("stringProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def _long_prop(name, value):
"""
Generates long property node
:param name:
:param value:
:return:
"""
res = etree.Element("longProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def _bool_prop(name, value):
"""
Generates boolean property
:param name:
:param value:
:return:
"""
res = etree.Element("boolProp", name=name)
res.text = 'true' if value else 'false'
return res
@staticmethod
def int_prop(name, value):
"""
JMX int property
:param name:
:param value:
:return:
"""
res = etree.Element("intProp", name=name)
res.text = text_type(value)
return res
@staticmethod
def get_thread_group(concurrency=None, rampup=0, hold=0, iterations=None,
testname="ThreadGroup", on_error="continue"):
"""
Generates ThreadGroup
:param concurrency:
:param rampup:
:param hold:
:param iterations:
:param testname:
:param on_error:
:return:
"""
if not rampup:
rampup = 0
rampup = cond_int(rampup)
hold = cond_int(hold)
if not concurrency:
concurrency = 1
if not iterations:
iterations = -1
scheduler = False
if hold or (rampup and not iterations):
scheduler = True
trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui",
testclass="ThreadGroup", testname=testname)
if on_error is not None:
trg.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
loop = etree.Element("elementProp",
name="ThreadGroup.main_controller",
elementType="LoopController",
guiclass="LoopControlPanel",
testclass="LoopController")
loop.append(JMX._bool_prop("LoopController.continue_forever", False)) # always false except of root LC
loop.append(JMX._string_prop("LoopController.loops", iterations))
trg.append(loop)
trg.append(JMX._string_prop("ThreadGroup.num_threads", concurrency))
trg.append(JMX._string_prop("ThreadGroup.ramp_time", rampup))
trg.append(JMX._string_prop("ThreadGroup.start_time", ""))
trg.append(JMX._string_prop("ThreadGroup.end_time", ""))
trg.append(JMX._bool_prop("ThreadGroup.scheduler", scheduler))
trg.append(JMX._string_prop("ThreadGroup.duration", rampup + hold))
return trg
def get_rps_shaper(self):
"""
:return: etree.Element
"""
throughput_timer_element = etree.Element(self.THR_TIMER,
guiclass=self.THR_TIMER + "Gui",
testclass=self.THR_TIMER,
testname="jp@gc - Throughput Shaping Timer",
enabled="true")
shaper_load_prof = self._collection_prop("load_profile")
throughput_timer_element.append(shaper_load_prof)
return throughput_timer_element
def add_rps_shaper_schedule(self, shaper_etree, start_rps, end_rps, duration):
"""
Adds schedule to rps shaper
:param shaper_etree:
:param start_rps:
:param end_rps:
:param duration:
:return:
"""
shaper_collection = shaper_etree.find(".//collectionProp[@name='load_profile']")
coll_prop = self._collection_prop("1817389797")
start_rps_prop = self._string_prop("49", cond_int(start_rps))
end_rps_prop = self._string_prop("1567", cond_int(end_rps))
duration_prop = self._string_prop("53", cond_int(duration))
coll_prop.append(start_rps_prop)
coll_prop.append(end_rps_prop)
coll_prop.append(duration_prop)
shaper_collection.append(coll_prop)
@staticmethod
def add_user_def_vars_elements(udv_dict, testname="Variables from Taurus"):
"""
:type testname: str
:type udv_dict: dict[str,str]
:rtype: etree.Element
"""
udv_element = etree.Element("Arguments", guiclass="ArgumentsPanel", testclass="Arguments",
testname=testname)
udv_collection_prop = JMX._collection_prop("Arguments.arguments")
for var_name in sorted(udv_dict.keys(), key=str):
udv_element_prop = JMX._element_prop(str(var_name), "Argument")
udv_arg_name_prop = JMX._string_prop("Argument.name", var_name)
udv_arg_value_prop = JMX._string_prop("Argument.value", udv_dict[var_name])
udv_arg_desc_prop = JMX._string_prop("Argument.desc", "")
udv_arg_meta_prop = JMX._string_prop("Argument.metadata", "=")
udv_element_prop.append(udv_arg_name_prop)
udv_element_prop.append(udv_arg_value_prop)
udv_element_prop.append(udv_arg_desc_prop)
udv_element_prop.append(udv_arg_meta_prop)
udv_collection_prop.append(udv_element_prop)
udv_element.append(udv_collection_prop)
return udv_element
@staticmethod
def get_concurrency_thread_group(
concurrency=None, rampup=0, hold=0, steps=None, on_error="continue", testname="ConcurrencyThreadGroup"):
"""
:return: etree element, Concurrency Thread Group
"""
if not rampup:
rampup = 0
if not concurrency:
concurrency = 1
if steps is None: # zero means infinity of steps
steps = 0
name = 'com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup'
concurrency_thread_group = etree.Element(
name, guiclass=name + "Gui", testclass=name, testname=testname, enabled="true")
virtual_user_controller = etree.Element(
"elementProp",
name="ThreadGroup.main_controller",
elementType="com.blazemeter.jmeter.control.VirtualUserController")
concurrency_thread_group.append(virtual_user_controller)
concurrency_thread_group.append(JMX._string_prop("ThreadGroup.on_sample_error", on_error))
concurrency_thread_group.append(JMX._string_prop("TargetLevel", str(concurrency)))
concurrency_thread_group.append(JMX._string_prop("RampUp", str(cond_int(rampup))))
concurrency_thread_group.append(JMX._string_prop("Steps", steps))
concurrency_thread_group.append(JMX._string_prop("Hold", str(cond_int(hold))))
concurrency_thread_group.append(JMX._string_prop("LogFilename", ""))
concurrency_thread_group.append(JMX._string_prop("Iterations", ""))
concurrency_thread_group.append(JMX._string_prop("Unit", "S"))
return concurrency_thread_group
@staticmethod
def get_dns_cache_mgr():
"""
Adds dns cache element with defaults parameters
:return:
"""
dns_element = etree.Element("DNSCacheManager", guiclass="DNSCachePanel", testclass="DNSCacheManager",
testname="DNS Cache Manager")
dns_element.append(JMX._collection_prop("DNSCacheManager.servers"))
dns_element.append(JMX._bool_prop("DNSCacheManager.clearEachIteration", False))
dns_element.append(JMX._bool_prop("DNSCacheManager.isCustomResolver", False))
return dns_element
@staticmethod
def _get_header_mgr(hdict):
"""
:type hdict: dict[str,str]
:rtype: lxml.etree.Element
"""
mgr = etree.Element("HeaderManager", guiclass="HeaderPanel", testclass="HeaderManager", testname="Headers")
coll_prop = etree.Element("collectionProp", name="HeaderManager.headers")
for hname, hval in iteritems(hdict):
header = etree.Element("elementProp", name="", elementType="Header")
header.append(JMX._string_prop("Header.name", hname))
header.append(JMX._string_prop("Header.value", hval))
coll_prop.append(header)
mgr.append(coll_prop)
return mgr
@staticmethod
def _get_cache_mgr():
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CacheManager", guiclass="CacheManagerGui", testclass="CacheManager", testname="Cache")
mgr.append(JMX._bool_prop("clearEachIteration", True))
mgr.append(JMX._bool_prop("useExpires", True))
return mgr
@staticmethod
def _get_cookie_mgr(scenario=None):
"""
:rtype: lxml.etree.Element
"""
mgr = etree.Element("CookieManager", guiclass="CookiePanel", testclass="CookieManager", testname="Cookies")
mgr.append(JMX._bool_prop("CookieManager.clearEachIteration", True))
mgr.append(JMX._string_prop("CookieManager.implementation",
"org.apache.jmeter.protocol.http.control.HC4CookieHandler"))
if scenario:
cookies = scenario.get(Scenario.COOKIES, [])
if cookies:
cookies_coll = JMX._collection_prop("CookieManager.cookies")
mgr.append(cookies_coll)
for cookie in cookies:
if not isinstance(cookie, dict):
raise TaurusConfigError("Cookie must be dictionary: %s" % cookie)
c_name = cookie.get("name", TaurusConfigError("Name of cookie isn't found: %s" % cookie))
c_value = cookie.get("value", TaurusConfigError("Value of cookie isn't found: %s" % cookie))
c_domain = cookie.get("domain", TaurusConfigError("Domain of cookie isn't found: %s" % cookie))
c_path = cookie.get("path", "")
c_secure = cookie.get("secure", False)
# follow params are hardcoded in JMeter
c_expires = 0
c_path_specified = True
c_domain_specified = True
c_elem = etree.Element("elementProp", name=c_name, elementType="Cookie", testname=c_name)
c_elem.append(JMX._string_prop("Cookie.value", c_value))
c_elem.append(JMX._string_prop("Cookie.domain", c_domain))
c_elem.append(JMX._string_prop("Cookie.path", c_path))
c_elem.append(JMX._bool_prop("Cookie.secure", c_secure))
c_elem.append(JMX._long_prop("Cookie.expires", c_expires))
c_elem.append(JMX._bool_prop("Cookie.path_specified", c_path_specified))
c_elem.append(JMX._bool_prop("Cookie.domain_specified", c_domain_specified))
cookies_coll.append(c_elem)
return mgr
@staticmethod
def _get_http_defaults(default_address=None, timeout=None, retrieve_resources=None, concurrent_pool_size=4,
content_encoding=None, resources_regex=None):
"""
:rtype: lxml.etree.Element
"""
cfg = etree.Element("ConfigTestElement", guiclass="HttpDefaultsGui",
testclass="ConfigTestElement", testname="Defaults")
if retrieve_resources:
cfg.append(JMX._bool_prop("HTTPSampler.image_parser", True))
cfg.append(JMX._bool_prop("HTTPSampler.concurrentDwn", True))
if concurrent_pool_size:
cfg.append(JMX._string_prop("HTTPSampler.concurrentPool", concurrent_pool_size))
params = etree.Element("elementProp",
name="HTTPsampler.Arguments",
elementType="Arguments",
guiclass="HTTPArgumentsPanel",
testclass="Arguments", testname="user_defined")
cfg.append(params)
if default_address:
parsed_url = parse.urlsplit(default_address)
if parsed_url.scheme:
cfg.append(JMX._string_prop("HTTPSampler.protocol", parsed_url.scheme))
if parsed_url.netloc:
netloc = parsed_url.netloc
if ':' in netloc:
index = netloc.rfind(':')
cfg.append(JMX._string_prop("HTTPSampler.port", netloc[index + 1:]))
netloc = netloc[:index]
cfg.append(JMX._string_prop("HTTPSampler.domain", netloc))
if timeout:
cfg.append(JMX._string_prop("HTTPSampler.connect_timeout", timeout))
cfg.append(JMX._string_prop("HTTPSampler.response_timeout", timeout))
if content_encoding:
cfg.append(JMX._string_prop("HTTPSampler.contentEncoding", content_encoding))
if resources_regex:
cfg.append(JMX._string_prop("HTTPSampler.embedded_url_re", resources_regex))
return cfg
@staticmethod
def _get_dur_assertion(timeout):
"""
:type timeout: int
:return:
"""
element = etree.Element("DurationAssertion", guiclass="DurationAssertionGui",
testclass="DurationAssertion", testname="Timeout Check")
element.append(JMX._string_prop("DurationAssertion.duration", timeout))
return element
@staticmethod
def _get_constant_timer(delay):
"""
:type delay: int
:rtype: lxml.etree.Element
"""
element = etree.Element("ConstantTimer", guiclass="ConstantTimerGui",
testclass="ConstantTimer", testname="Think-Time")
element.append(JMX._string_prop("ConstantTimer.delay", delay))
return element
@staticmethod
def _get_extractor(varname, headers, regexp, template, match_no, default='NOT_FOUND'):
"""
:type varname: str
:type regexp: str
:type template: str|int
:type match_no: int
:type default: str
:rtype: lxml.etree.Element
"""
if isinstance(template, int):
template = '$%s$' % template
if headers.lower() == 'headers':
headers = 'true'
elif headers.lower() == 'http-code':
headers = 'code'
elif headers.lower() == 'url':
headers = 'URL'
else:
headers = 'body'
element = etree.Element("RegexExtractor", guiclass="RegexExtractorGui",
testclass="RegexExtractor", testname="Get %s" % varname, enabled="true")
element.append(JMX._string_prop("RegexExtractor.useHeaders", headers))
element.append(JMX._string_prop("RegexExtractor.refname", varname))
element.append(JMX._string_prop("RegexExtractor.regex", regexp))
element.append(JMX._string_prop("Sample.scope", "parent"))
element.append(JMX._string_prop("RegexExtractor.template", template))
element.append(JMX._string_prop("RegexExtractor.default", default))
element.append(JMX._string_prop("RegexExtractor.match_number", match_no))
return element
@staticmethod
def _get_jquerycss_extractor(varname, selector, attribute, match_no, default="NOT_FOUND"):
"""
:type varname: str
:type regexp: str
:type match_no: int
:type default: str
:rtype: lxml.etree.Element
"""
element = etree.Element("HtmlExtractor", guiclass="HtmlExtractorGui", testclass="HtmlExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("HtmlExtractor.refname", varname))
element.append(JMX._string_prop("HtmlExtractor.expr", selector))
element.append(JMX._string_prop("HtmlExtractor.attribute", attribute))
element.append(JMX._string_prop("HtmlExtractor.match_number", match_no))
element.append(JMX._string_prop("HtmlExtractor.default", default))
return element
@staticmethod
def _get_json_extractor(varname, jsonpath, default='NOT_FOUND', from_variable=None):
"""
:type varname: str
:type default: str
:rtype: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathextractor"
element = etree.Element("%s.JSONPathExtractor" % package,
guiclass="%s.gui.JSONPathExtractorGui" % package,
testclass="%s.JSONPathExtractor" % package,
testname="Get %s" % varname)
element.append(JMX._string_prop("VAR", varname))
element.append(JMX._string_prop("JSONPATH", jsonpath))
element.append(JMX._string_prop("DEFAULT", default))
if from_variable:
element.append(JMX._string_prop("VARIABLE", from_variable))
element.append(JMX._string_prop("SUBJECT", "VAR"))
return element
@staticmethod
def _get_json_path_assertion(jsonpath, expected_value, json_validation, expect_null, invert, regexp=True):
"""
:type jsonpath: str
:type expected_value: str
:type json_validation: bool
:type expect_null: bool
:type invert: bool
:type regexp: bool
:return: lxml.etree.Element
"""
package = "com.atlantbh.jmeter.plugins.jsonutils.jsonpathassertion"
element = etree.Element("%s.JSONPathAssertion" % package,
guiclass="%s.gui.JSONPathAssertionGui" % package,
testclass="%s.JSONPathAssertion" % package,
testname="JSon path assertion")
element.append(JMX._string_prop("JSON_PATH", jsonpath))
element.append(JMX._string_prop("EXPECTED_VALUE", expected_value))
element.append(JMX._bool_prop("JSONVALIDATION", json_validation))
element.append(JMX._bool_prop("EXPECT_NULL", expect_null))
element.append(JMX._bool_prop("INVERT", invert))
element.append(JMX._bool_prop("ISREGEX", regexp))
return element
@staticmethod
def _get_xpath_extractor(varname, xpath, default, validate_xml, ignore_whitespace, use_tolerant_parser):
"""
:type varname: str
:type xpath: str
:type default: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:rtype: lxml.etree.Element
"""
element = etree.Element("XPathExtractor",
guiclass="XPathExtractorGui",
testclass="XPathExtractor",
testname="Get %s" % varname)
element.append(JMX._string_prop("XPathExtractor.refname", varname))
element.append(JMX._string_prop("XPathExtractor.xpathQuery", xpath))
element.append(JMX._string_prop("XPathExtractor.default", default))
element.append(JMX._bool_prop("XPathExtractor.validate", validate_xml))
element.append(JMX._bool_prop("XPathExtractor.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPathExtractor.tolerant", use_tolerant_parser))
return element
@staticmethod
def _get_xpath_assertion(xpath, validate_xml, ignore_whitespace, use_tolerant_parser, invert):
"""
:type xpath: str
:type validate_xml: bool
:type ignore_whitespace: bool
:type use_tolerant_parser: bool
:return: lxml.etree.Element
"""
element = etree.Element("XPathAssertion",
guiclass="XPathAssertionGui",
testclass="XPathAssertion",
testname="XPath Assertion")
element.append(JMX._string_prop("XPath.xpath", xpath))
element.append(JMX._bool_prop("XPath.validate", validate_xml))
element.append(JMX._bool_prop("XPath.whitespace", ignore_whitespace))
element.append(JMX._bool_prop("XPath.tolerant", use_tolerant_parser))
element.append(JMX._bool_prop("XPath.negate", invert))
return element
@staticmethod
def _get_resp_assertion(field, contains, is_regexp, is_invert, assume_success=False):
"""
:type field: str
:type contains: list[str]
:type is_regexp: bool
:type is_invert: bool
:rtype: lxml.etree.Element
"""
tname = "Assert %s %s" % ("hasn't" if is_invert else "has",
"[" + ", ".join('"' + text_type(x) + '"' for x in contains) + "]")
element = etree.Element("ResponseAssertion", guiclass="AssertionGui",
testclass="ResponseAssertion", testname=tname)
if field == Scenario.FIELD_HEADERS:
fld = "Assertion.response_headers"
elif field == Scenario.FIELD_RESP_CODE:
fld = "Assertion.response_code"
else:
fld = "Assertion.response_data"
if is_regexp:
if is_invert:
mtype = 6 # not contains
else:
mtype = 2 # contains
else:
if is_invert:
mtype = 20 # not substring
else:
mtype = 16 # substring
element.append(JMX._string_prop("Assertion.test_field", fld))
element.append(JMX._string_prop("Assertion.test_type", mtype))
element.append(JMX._bool_prop("Assertion.assume_success", assume_success))
coll_prop = etree.Element("collectionProp", name="Asserion.test_strings")
for string in contains:
coll_prop.append(JMX._string_prop("", string))
element.append(coll_prop)
return element
@staticmethod
def _get_jsr223_element(language, script_file, parameters, execute, script_text=None):
if execute == "before":
element = etree.Element("JSR223PreProcessor", guiclass="TestBeanGUI",
testclass="JSR223PreProcessor", testname="JSR223 PreProcessor")
else:
element = etree.Element("JSR223PostProcessor", guiclass="TestBeanGUI",
testclass="JSR223PostProcessor", testname="JSR223 PostProcessor")
element.append(JMX._string_prop("filename", script_file if script_file else ''))
element.append(JMX._string_prop("script", script_text if script_text else ''))
element.append(JMX._string_prop("parameters", parameters))
element.append(JMX._string_prop("scriptLanguage", language))
return element
@staticmethod
def _get_csv_config(path, delimiter, is_quoted, loop, variable_names):
"""
:type path: str
:type delimiter: str
:type is_quoted: bool
:return:
"""
element = etree.Element("CSVDataSet", guiclass="TestBeanGUI",
testclass="CSVDataSet", testname="CSV %s" % os.path.basename(path))
element.append(JMX._string_prop("filename", path))
element.append(JMX._string_prop("delimiter", delimiter))
element.append(JMX._bool_prop("quotedData", is_quoted))
element.append(JMX._bool_prop("recycle", loop))
element.append(JMX._bool_prop("stopThread", not loop))
element.append(JMX._string_prop("variableNames", variable_names))
return element
def set_enabled(self, sel, state):
"""
Toggle items by selector
:type sel: str
:type state: bool
"""
items = self.get(sel)
self.log.debug("Enable %s elements %s: %s", state, sel, items)
for item in items:
item.set("enabled", 'true' if state else 'false')
def set_text(self, sel, text):
"""
Set text value
:type sel: str
:type text: str
"""
items = self.get(sel)
res = 0
for item in items:
item.text = text_type(text)
res += 1
return res
@staticmethod
def _get_simple_controller(name):
return etree.Element("GenericController", guiclass="LogicControllerGui", testclass="GenericController",
testname=name)
def _add_results_tree(self):
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
self.append(self.TEST_PLAN_SEL, dbg_tree)
self.append(self.TEST_PLAN_SEL, etree.Element("hashTree"))
@staticmethod
def _get_results_tree():
dbg_tree = etree.Element("ResultCollector",
testname="View Results Tree",
testclass="ResultCollector",
guiclass="ViewResultsFullVisualizer")
return dbg_tree
@staticmethod
def _get_if_controller(condition):
controller = etree.Element("IfController", guiclass="IfControllerPanel", testclass="IfController",
testname="If Controller")
controller.append(JMX._string_prop("IfController.condition", condition))
return controller
@staticmethod
def _get_loop_controller(loops):
if loops == 'forever':
iterations = -1
else:
iterations = loops
controller = etree.Element("LoopController", guiclass="LoopControlPanel", testclass="LoopController",
testname="Loop Controller")
controller.append(JMX._bool_prop("LoopController.continue_forever", False)) # always false except of root LC
controller.append(JMX._string_prop("LoopController.loops", str(iterations)))
return controller
@staticmethod
def _get_foreach_controller(input_var, loop_var):
# TODO: useSeparator option
controller = etree.Element("ForeachController", guiclass="ForeachControlPanel", testclass="ForeachController",
testname="ForEach Controller")
controller.append(JMX._string_prop("ForeachController.inputVal", input_var))
controller.append(JMX._string_prop("ForeachController.returnVal", loop_var))
controller.append(JMX._bool_prop("ForeachController.useSeparator", True))
return controller
@staticmethod
def _get_while_controller(condition):
controller = etree.Element("WhileController", guiclass="WhileControllerGui", testclass="WhileController",
testname="While Controller")
controller.append(JMX._string_prop("WhileController.condition", condition))
return controller
@staticmethod
def _get_transaction_controller(transaction_name, force_parent_sample=False):
controller = etree.Element("TransactionController", guiclass="TransactionControllerGui",
testclass="TransactionController", testname=transaction_name)
controller.append(JMX._bool_prop("TransactionController.parent", force_parent_sample))
return controller
@staticmethod
def _get_functional_mode_prop(enabled):
return JMX._bool_prop("TestPlan.functional_mode", enabled)
@staticmethod
def _get_action_block(action_index, target_index, duration_ms):
action = etree.Element("TestAction", guiclass="TestActionGui", testclass="TestAction", testname="Test Action")
action.append(JMX.int_prop("ActionProcessor.action", action_index))
action.append(JMX.int_prop("ActionProcessor.target", target_index))
action.append(JMX._string_prop("ActionProcessor.duration", str(duration_ms)))
return action
| 1 | 14,517 | what if one of them is property and one is not? | Blazemeter-taurus | py |
@@ -277,6 +277,8 @@ module.exports = class Dashboard extends Plugin {
name: file.name,
type: file.type,
data: blob
+ }).catch(() => {
+ // Ignore
})
})
} | 1 | const Plugin = require('../../core/Plugin')
const Translator = require('../../core/Translator')
const dragDrop = require('drag-drop')
const DashboardUI = require('./Dashboard')
const StatusBar = require('../StatusBar')
const Informer = require('../Informer')
const ThumbnailGenerator = require('../ThumbnailGenerator')
const { findAllDOMElements, toArray } = require('../../core/Utils')
const prettyBytes = require('prettier-bytes')
const { defaultTabIcon } = require('./icons')
// Some code for managing focus was adopted from https://github.com/ghosh/micromodal
// MIT licence, https://github.com/ghosh/micromodal/blob/master/LICENSE.md
// Copyright (c) 2017 Indrashish Ghosh
const FOCUSABLE_ELEMENTS = [
'a[href]',
'area[href]',
'input:not([disabled]):not([type="hidden"]):not([aria-hidden])',
'select:not([disabled]):not([aria-hidden])',
'textarea:not([disabled]):not([aria-hidden])',
'button:not([disabled]):not([aria-hidden])',
'iframe',
'object',
'embed',
'[contenteditable]',
'[tabindex]:not([tabindex^="-"])'
]
/**
* Dashboard UI with previews, metadata editing, tabs for various services and more
*/
module.exports = class Dashboard extends Plugin {
constructor (uppy, opts) {
super(uppy, opts)
this.id = this.opts.id || 'Dashboard'
this.title = 'Dashboard'
this.type = 'orchestrator'
const defaultLocale = {
strings: {
selectToUpload: 'Select files to upload',
closeModal: 'Close Modal',
upload: 'Upload',
importFrom: 'Import files from',
dashboardWindowTitle: 'Uppy Dashboard Window (Press escape to close)',
dashboardTitle: 'Uppy Dashboard',
copyLinkToClipboardSuccess: 'Link copied to clipboard.',
copyLinkToClipboardFallback: 'Copy the URL below',
copyLink: 'Copy link',
fileSource: 'File source',
done: 'Done',
name: 'Name',
removeFile: 'Remove file',
editFile: 'Edit file',
editing: 'Editing',
finishEditingFile: 'Finish editing file',
localDisk: 'Local Disk',
myDevice: 'My Device',
dropPasteImport: 'Drop files here, paste, import from one of the locations above or',
dropPaste: 'Drop files here, paste or',
browse: 'browse',
fileProgress: 'File progress: upload speed and ETA',
numberOfSelectedFiles: 'Number of selected files',
uploadAllNewFiles: 'Upload all new files',
emptyFolderAdded: 'No files were added from empty folder',
uploadXFiles: {
0: 'Upload %{smart_count} file',
1: 'Upload %{smart_count} files'
},
uploadXNewFiles: {
0: 'Upload +%{smart_count} file',
1: 'Upload +%{smart_count} files'
},
folderAdded: {
0: 'Added %{smart_count} file from %{folder}',
1: 'Added %{smart_count} files from %{folder}'
}
}
}
// set default options
const defaultOptions = {
target: 'body',
metaFields: [],
trigger: '#uppy-select-files',
inline: false,
width: 750,
height: 550,
thumbnailWidth: 280,
defaultTabIcon: defaultTabIcon,
showProgressDetails: false,
hideUploadButton: false,
hideProgressAfterFinish: false,
note: null,
closeModalOnClickOutside: false,
disableStatusBar: false,
disableInformer: false,
disableThumbnailGenerator: false,
disablePageScrollWhenModalOpen: true,
onRequestCloseModal: () => this.closeModal(),
locale: defaultLocale
}
// merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
this.locale = Object.assign({}, defaultLocale, this.opts.locale)
this.locale.strings = Object.assign({}, defaultLocale.strings, this.opts.locale.strings)
this.translator = new Translator({locale: this.locale})
this.i18n = this.translator.translate.bind(this.translator)
this.openModal = this.openModal.bind(this)
this.closeModal = this.closeModal.bind(this)
this.requestCloseModal = this.requestCloseModal.bind(this)
this.isModalOpen = this.isModalOpen.bind(this)
this.addTarget = this.addTarget.bind(this)
this.hideAllPanels = this.hideAllPanels.bind(this)
this.showPanel = this.showPanel.bind(this)
this.getFocusableNodes = this.getFocusableNodes.bind(this)
this.setFocusToFirstNode = this.setFocusToFirstNode.bind(this)
this.maintainFocus = this.maintainFocus.bind(this)
this.initEvents = this.initEvents.bind(this)
this.onKeydown = this.onKeydown.bind(this)
this.handleClickOutside = this.handleClickOutside.bind(this)
this.handleFileCard = this.handleFileCard.bind(this)
this.handleDrop = this.handleDrop.bind(this)
this.handlePaste = this.handlePaste.bind(this)
this.handleInputChange = this.handleInputChange.bind(this)
this.updateDashboardElWidth = this.updateDashboardElWidth.bind(this)
this.render = this.render.bind(this)
this.install = this.install.bind(this)
}
addTarget (plugin) {
const callerPluginId = plugin.id || plugin.constructor.name
const callerPluginName = plugin.title || callerPluginId
const callerPluginType = plugin.type
if (callerPluginType !== 'acquirer' &&
callerPluginType !== 'progressindicator' &&
callerPluginType !== 'presenter') {
let msg = 'Dashboard: Modal can only be used by plugins of types: acquirer, progressindicator, presenter'
this.uppy.log(msg)
return
}
const target = {
id: callerPluginId,
name: callerPluginName,
type: callerPluginType
}
const state = this.getPluginState()
const newTargets = state.targets.slice()
newTargets.push(target)
this.setPluginState({
targets: newTargets
})
return this.el
}
hideAllPanels () {
this.setPluginState({
activePanel: false
})
}
showPanel (id) {
const { targets } = this.getPluginState()
const activePanel = targets.filter((target) => {
return target.type === 'acquirer' && target.id === id
})[0]
this.setPluginState({
activePanel: activePanel
})
}
requestCloseModal () {
if (this.opts.onRequestCloseModal) {
return this.opts.onRequestCloseModal()
} else {
this.closeModal()
}
}
getFocusableNodes () {
const nodes = this.el.querySelectorAll(FOCUSABLE_ELEMENTS)
return Object.keys(nodes).map((key) => nodes[key])
}
setFocusToFirstNode () {
const focusableNodes = this.getFocusableNodes()
if (focusableNodes.length) focusableNodes[0].focus()
}
maintainFocus (event) {
var focusableNodes = this.getFocusableNodes()
var focusedItemIndex = focusableNodes.indexOf(document.activeElement)
if (event.shiftKey && focusedItemIndex === 0) {
focusableNodes[focusableNodes.length - 1].focus()
event.preventDefault()
}
if (!event.shiftKey && focusedItemIndex === focusableNodes.length - 1) {
focusableNodes[0].focus()
event.preventDefault()
}
}
openModal () {
this.setPluginState({
isHidden: false
})
// save scroll position
this.savedScrollPosition = window.scrollY
// save active element, so we can restore focus when modal is closed
this.savedActiveElement = document.activeElement
if (this.opts.disablePageScrollWhenModalOpen) {
document.body.classList.add('uppy-Dashboard-isOpen')
}
this.updateDashboardElWidth()
this.setFocusToFirstNode()
}
closeModal () {
this.setPluginState({
isHidden: true
})
if (this.opts.disablePageScrollWhenModalOpen) {
document.body.classList.remove('uppy-Dashboard-isOpen')
}
this.savedActiveElement.focus()
}
isModalOpen () {
return !this.getPluginState().isHidden || false
}
onKeydown (event) {
// close modal on esc key press
if (event.keyCode === 27) this.requestCloseModal(event)
// maintainFocus on tab key press
if (event.keyCode === 9) this.maintainFocus(event)
}
handleClickOutside () {
if (this.opts.closeModalOnClickOutside) this.requestCloseModal()
}
handlePaste (ev) {
const files = toArray(ev.clipboardData.items)
files.forEach((file) => {
if (file.kind !== 'file') return
const blob = file.getAsFile()
if (!blob) {
this.uppy.log('[Dashboard] File pasted, but the file blob is empty')
this.uppy.info('Error pasting file', 'error')
return
}
this.uppy.log('[Dashboard] File pasted')
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: blob
})
})
}
handleInputChange (ev) {
ev.preventDefault()
const files = toArray(ev.target.files)
files.forEach((file) => {
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: file
})
})
}
initEvents () {
// Modal open button
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.addEventListener('click', this.openModal))
}
if (!this.opts.inline && !showModalTrigger) {
this.uppy.log('Dashboard modal trigger not found. Make sure `trigger` is set in Dashboard options unless you are planning to call openModal() method yourself')
}
if (!this.opts.inline) {
document.addEventListener('keydown', this.onKeydown)
}
// Drag Drop
this.removeDragDropListener = dragDrop(this.el, (files) => {
this.handleDrop(files)
})
this.uppy.on('dashboard:file-card', this.handleFileCard)
this.updateDashboardElWidth()
window.addEventListener('resize', this.updateDashboardElWidth)
}
removeEvents () {
const showModalTrigger = findAllDOMElements(this.opts.trigger)
if (!this.opts.inline && showModalTrigger) {
showModalTrigger.forEach(trigger => trigger.removeEventListener('click', this.openModal))
}
if (!this.opts.inline) {
document.removeEventListener('keydown', this.onKeydown)
}
this.removeDragDropListener()
this.uppy.off('dashboard:file-card', this.handleFileCard)
window.removeEventListener('resize', this.updateDashboardElWidth)
}
updateDashboardElWidth () {
const dashboardEl = this.el.querySelector('.uppy-Dashboard-inner')
this.uppy.log(`Dashboard width: ${dashboardEl.offsetWidth}`)
this.setPluginState({
containerWidth: dashboardEl.offsetWidth
})
}
handleFileCard (fileId) {
this.setPluginState({
fileCardFor: fileId || false
})
}
handleDrop (files) {
this.uppy.log('[Dashboard] Files were dropped')
files.forEach((file) => {
this.uppy.addFile({
source: this.id,
name: file.name,
type: file.type,
data: file
})
})
}
render (state) {
const pluginState = this.getPluginState()
const files = state.files
const newFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadStarted
})
const inProgressFiles = Object.keys(files).filter((file) => {
return !files[file].progress.uploadComplete &&
files[file].progress.uploadStarted &&
!files[file].isPaused
})
let inProgressFilesArray = []
inProgressFiles.forEach((file) => {
inProgressFilesArray.push(files[file])
})
let totalSize = 0
let totalUploadedSize = 0
inProgressFilesArray.forEach((file) => {
totalSize = totalSize + (file.progress.bytesTotal || 0)
totalUploadedSize = totalUploadedSize + (file.progress.bytesUploaded || 0)
})
totalSize = prettyBytes(totalSize)
totalUploadedSize = prettyBytes(totalUploadedSize)
const attachRenderFunctionToTarget = (target) => {
const plugin = this.uppy.getPlugin(target.id)
return Object.assign({}, target, {
icon: plugin.icon || this.opts.defaultTabIcon,
render: plugin.render
})
}
const isSupported = (target) => {
const plugin = this.uppy.getPlugin(target.id)
// If the plugin does not provide a `supported` check, assume the plugin works everywhere.
if (typeof plugin.isSupported !== 'function') {
return true
}
return plugin.isSupported()
}
const acquirers = pluginState.targets
.filter(target => target.type === 'acquirer' && isSupported(target))
.map(attachRenderFunctionToTarget)
const progressindicators = pluginState.targets
.filter(target => target.type === 'progressindicator')
.map(attachRenderFunctionToTarget)
const startUpload = (ev) => {
this.uppy.upload().catch((err) => {
// Log error.
this.uppy.log(err.stack || err.message || err)
})
}
const cancelUpload = (fileID) => {
this.uppy.emit('upload-cancel', fileID)
this.uppy.removeFile(fileID)
}
const showFileCard = (fileID) => {
this.uppy.emit('dashboard:file-card', fileID)
}
const fileCardDone = (meta, fileID) => {
this.uppy.setFileMeta(fileID, meta)
this.uppy.emit('dashboard:file-card')
}
return DashboardUI({
state: state,
modal: pluginState,
newFiles: newFiles,
files: files,
totalFileCount: Object.keys(files).length,
totalProgress: state.totalProgress,
acquirers: acquirers,
activePanel: pluginState.activePanel,
getPlugin: this.uppy.getPlugin,
progressindicators: progressindicators,
autoProceed: this.uppy.opts.autoProceed,
hideUploadButton: this.opts.hideUploadButton,
id: this.id,
closeModal: this.requestCloseModal,
handleClickOutside: this.handleClickOutside,
handleInputChange: this.handleInputChange,
handlePaste: this.handlePaste,
showProgressDetails: this.opts.showProgressDetails,
inline: this.opts.inline,
showPanel: this.showPanel,
hideAllPanels: this.hideAllPanels,
log: this.uppy.log,
i18n: this.i18n,
addFile: this.uppy.addFile,
removeFile: this.uppy.removeFile,
info: this.uppy.info,
note: this.opts.note,
metaFields: this.getPluginState().metaFields,
resumableUploads: this.uppy.state.capabilities.resumableUploads || false,
startUpload: startUpload,
pauseUpload: this.uppy.pauseResume,
retryUpload: this.uppy.retryUpload,
cancelUpload: cancelUpload,
fileCardFor: pluginState.fileCardFor,
showFileCard: showFileCard,
fileCardDone: fileCardDone,
updateDashboardElWidth: this.updateDashboardElWidth,
maxWidth: this.opts.maxWidth,
maxHeight: this.opts.maxHeight,
currentWidth: pluginState.containerWidth,
isWide: pluginState.containerWidth > 400
})
}
discoverProviderPlugins () {
this.uppy.iteratePlugins((plugin) => {
if (plugin && !plugin.target && plugin.opts && plugin.opts.target === this.constructor) {
this.addTarget(plugin)
}
})
}
install () {
// Set default state for Modal
this.setPluginState({
isHidden: true,
showFileCard: false,
activePanel: false,
metaFields: this.opts.metaFields,
targets: []
})
const target = this.opts.target
if (target) {
this.mount(target, this)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) plugin.mount(this, plugin)
})
if (!this.opts.disableStatusBar) {
this.uppy.use(StatusBar, {
target: this,
hideUploadButton: this.opts.hideUploadButton,
hideAfterFinish: this.opts.hideProgressAfterFinish,
locale: this.opts.locale
})
}
if (!this.opts.disableInformer) {
this.uppy.use(Informer, {
target: this
})
}
if (!this.opts.disableThumbnailGenerator) {
this.uppy.use(ThumbnailGenerator, {
thumbnailWidth: this.opts.thumbnailWidth
})
}
this.discoverProviderPlugins()
this.initEvents()
}
uninstall () {
if (!this.opts.disableInformer) {
const informer = this.uppy.getPlugin('Informer')
// Checking if this plugin exists, in case it was removed by uppy-core
// before the Dashboard was.
if (informer) this.uppy.removePlugin(informer)
}
if (!this.opts.disableStatusBar) {
const statusBar = this.uppy.getPlugin('StatusBar')
if (statusBar) this.uppy.removePlugin(statusBar)
}
if (!this.opts.disableThumbnailGenerator) {
const thumbnail = this.uppy.getPlugin('ThumbnailGenerator')
if (thumbnail) this.uppy.removePlugin(thumbnail)
}
const plugins = this.opts.plugins || []
plugins.forEach((pluginID) => {
const plugin = this.uppy.getPlugin(pluginID)
if (plugin) plugin.unmount()
})
this.unmount()
this.removeEvents()
}
}
| 1 | 10,530 | > UI plugins swallow rejection errors so they don't end up in the console with no way to fix But it will still catch upstream in `addFile` and restrictions to show the informer?.. | transloadit-uppy | js |
@@ -378,11 +378,12 @@ const executeOperation = (topology, operation, args, options) => {
// The driver sessions spec mandates that we implicitly create sessions for operations
// that are not explicitly provided with a session.
- let session, opOptions;
+ let session, opOptions, owner;
if (!options.skipSessions && topology.hasSessionSupport()) {
opOptions = args[args.length - 2];
if (opOptions == null || opOptions.session == null) {
- session = topology.startSession();
+ owner = {};
+ session = topology.startSession({ owner });
const optionsIndex = args.length - 2;
args[optionsIndex] = Object.assign({}, args[optionsIndex], { session: session });
} else if (opOptions.session && opOptions.session.hasEnded) { | 1 | 'use strict';
var MongoError = require('mongodb-core').MongoError,
ReadPreference = require('mongodb-core').ReadPreference;
var shallowClone = function(obj) {
var copy = {};
for (var name in obj) copy[name] = obj[name];
return copy;
};
// Figure out the read preference
var translateReadPreference = function(options) {
var r = null;
if (options.readPreference) {
r = options.readPreference;
} else {
return options;
}
if (typeof r === 'string') {
options.readPreference = new ReadPreference(r);
} else if (r && !(r instanceof ReadPreference) && typeof r === 'object') {
const mode = r.mode || r.preference;
if (mode && typeof mode === 'string') {
options.readPreference = new ReadPreference(mode, r.tags, {
maxStalenessSeconds: r.maxStalenessSeconds
});
}
} else if (!(r instanceof ReadPreference)) {
throw new TypeError('Invalid read preference: ' + r);
}
return options;
};
// Set simple property
var getSingleProperty = function(obj, name, value) {
Object.defineProperty(obj, name, {
enumerable: true,
get: function() {
return value;
}
});
};
var formatSortValue = (exports.formatSortValue = function(sortDirection) {
var value = ('' + sortDirection).toLowerCase();
switch (value) {
case 'ascending':
case 'asc':
case '1':
return 1;
case 'descending':
case 'desc':
case '-1':
return -1;
default:
throw new Error(
'Illegal sort clause, must be of the form ' +
"[['field1', '(ascending|descending)'], " +
"['field2', '(ascending|descending)']]"
);
}
});
var formattedOrderClause = (exports.formattedOrderClause = function(sortValue) {
var orderBy = {};
if (sortValue == null) return null;
if (Array.isArray(sortValue)) {
if (sortValue.length === 0) {
return null;
}
for (var i = 0; i < sortValue.length; i++) {
if (sortValue[i].constructor === String) {
orderBy[sortValue[i]] = 1;
} else {
orderBy[sortValue[i][0]] = formatSortValue(sortValue[i][1]);
}
}
} else if (sortValue != null && typeof sortValue === 'object') {
orderBy = sortValue;
} else if (typeof sortValue === 'string') {
orderBy[sortValue] = 1;
} else {
throw new Error(
'Illegal sort clause, must be of the form ' +
"[['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]"
);
}
return orderBy;
});
var checkCollectionName = function checkCollectionName(collectionName) {
if ('string' !== typeof collectionName) {
throw new MongoError('collection name must be a String');
}
if (!collectionName || collectionName.indexOf('..') !== -1) {
throw new MongoError('collection names cannot be empty');
}
if (
collectionName.indexOf('$') !== -1 &&
collectionName.match(/((^\$cmd)|(oplog\.\$main))/) == null
) {
throw new MongoError("collection names must not contain '$'");
}
if (collectionName.match(/^\.|\.$/) != null) {
throw new MongoError("collection names must not start or end with '.'");
}
// Validate that we are not passing 0x00 in the colletion name
if (collectionName.indexOf('\x00') !== -1) {
throw new MongoError('collection names cannot contain a null character');
}
};
var handleCallback = function(callback, err, value1, value2) {
try {
if (callback == null) return;
if (callback) {
return value2 ? callback(err, value1, value2) : callback(err, value1);
}
} catch (err) {
process.nextTick(function() {
throw err;
});
return false;
}
return true;
};
/**
* Wrap a Mongo error document in an Error instance
* @ignore
* @api private
*/
var toError = function(error) {
if (error instanceof Error) return error;
var msg = error.err || error.errmsg || error.errMessage || error;
var e = MongoError.create({ message: msg, driver: true });
// Get all object keys
var keys = typeof error === 'object' ? Object.keys(error) : [];
for (var i = 0; i < keys.length; i++) {
try {
e[keys[i]] = error[keys[i]];
} catch (err) {
// continue
}
}
return e;
};
/**
* @ignore
*/
var normalizeHintField = function normalizeHintField(hint) {
var finalHint = null;
if (typeof hint === 'string') {
finalHint = hint;
} else if (Array.isArray(hint)) {
finalHint = {};
hint.forEach(function(param) {
finalHint[param] = 1;
});
} else if (hint != null && typeof hint === 'object') {
finalHint = {};
for (var name in hint) {
finalHint[name] = hint[name];
}
}
return finalHint;
};
/**
* Create index name based on field spec
*
* @ignore
* @api private
*/
var parseIndexOptions = function(fieldOrSpec) {
var fieldHash = {};
var indexes = [];
var keys;
// Get all the fields accordingly
if ('string' === typeof fieldOrSpec) {
// 'type'
indexes.push(fieldOrSpec + '_' + 1);
fieldHash[fieldOrSpec] = 1;
} else if (Array.isArray(fieldOrSpec)) {
fieldOrSpec.forEach(function(f) {
if ('string' === typeof f) {
// [{location:'2d'}, 'type']
indexes.push(f + '_' + 1);
fieldHash[f] = 1;
} else if (Array.isArray(f)) {
// [['location', '2d'],['type', 1]]
indexes.push(f[0] + '_' + (f[1] || 1));
fieldHash[f[0]] = f[1] || 1;
} else if (isObject(f)) {
// [{location:'2d'}, {type:1}]
keys = Object.keys(f);
keys.forEach(function(k) {
indexes.push(k + '_' + f[k]);
fieldHash[k] = f[k];
});
} else {
// undefined (ignore)
}
});
} else if (isObject(fieldOrSpec)) {
// {location:'2d', type:1}
keys = Object.keys(fieldOrSpec);
keys.forEach(function(key) {
indexes.push(key + '_' + fieldOrSpec[key]);
fieldHash[key] = fieldOrSpec[key];
});
}
return {
name: indexes.join('_'),
keys: keys,
fieldHash: fieldHash
};
};
var isObject = (exports.isObject = function(arg) {
return '[object Object]' === Object.prototype.toString.call(arg);
});
var debugOptions = function(debugFields, options) {
var finaloptions = {};
debugFields.forEach(function(n) {
finaloptions[n] = options[n];
});
return finaloptions;
};
var decorateCommand = function(command, options, exclude) {
for (var name in options) {
if (exclude[name] == null) command[name] = options[name];
}
return command;
};
var mergeOptions = function(target, source) {
for (var name in source) {
target[name] = source[name];
}
return target;
};
// Merge options with translation
var translateOptions = function(target, source) {
var translations = {
// SSL translation options
sslCA: 'ca',
sslCRL: 'crl',
sslValidate: 'rejectUnauthorized',
sslKey: 'key',
sslCert: 'cert',
sslPass: 'passphrase',
// SocketTimeout translation options
socketTimeoutMS: 'socketTimeout',
connectTimeoutMS: 'connectionTimeout',
// Replicaset options
replicaSet: 'setName',
rs_name: 'setName',
secondaryAcceptableLatencyMS: 'acceptableLatency',
connectWithNoPrimary: 'secondaryOnlyConnectionAllowed',
// Mongos options
acceptableLatencyMS: 'localThresholdMS'
};
for (var name in source) {
if (translations[name]) {
target[translations[name]] = source[name];
} else {
target[name] = source[name];
}
}
return target;
};
var filterOptions = function(options, names) {
var filterOptions = {};
for (var name in options) {
if (names.indexOf(name) !== -1) filterOptions[name] = options[name];
}
// Filtered options
return filterOptions;
};
// Write concern keys
var writeConcernKeys = ['w', 'j', 'wtimeout', 'fsync'];
// Merge the write concern options
var mergeOptionsAndWriteConcern = function(targetOptions, sourceOptions, keys, mergeWriteConcern) {
// Mix in any allowed options
for (var i = 0; i < keys.length; i++) {
if (!targetOptions[keys[i]] && sourceOptions[keys[i]] !== undefined) {
targetOptions[keys[i]] = sourceOptions[keys[i]];
}
}
// No merging of write concern
if (!mergeWriteConcern) return targetOptions;
// Found no write Concern options
var found = false;
for (i = 0; i < writeConcernKeys.length; i++) {
if (targetOptions[writeConcernKeys[i]]) {
found = true;
break;
}
}
if (!found) {
for (i = 0; i < writeConcernKeys.length; i++) {
if (sourceOptions[writeConcernKeys[i]]) {
targetOptions[writeConcernKeys[i]] = sourceOptions[writeConcernKeys[i]];
}
}
}
return targetOptions;
};
/**
* Executes the given operation with provided arguments.
*
* This method reduces large amounts of duplication in the entire codebase by providing
* a single point for determining whether callbacks or promises should be used. Additionally
* it allows for a single point of entry to provide features such as implicit sessions, which
* are required by the Driver Sessions specification in the event that a ClientSession is
* not provided
*
* @param {object} topology The topology to execute this operation on
* @param {function} operation The operation to execute
* @param {array} args Arguments to apply the provided operation
* @param {object} [options] Options that modify the behavior of the method
* @param {function]} [options.resultMutator] Allows for the result of the operation to be changed for custom return types
*/
const executeOperation = (topology, operation, args, options) => {
if (topology == null) {
throw new TypeError('This method requires a valid topology instance');
}
if (!Array.isArray(args)) {
throw new TypeError('This method requires an array of arguments to apply');
}
options = options || {};
const Promise = topology.s.promiseLibrary;
let resultMutator = options.resultMutator;
let callback = args[args.length - 1];
// The driver sessions spec mandates that we implicitly create sessions for operations
// that are not explicitly provided with a session.
let session, opOptions;
if (!options.skipSessions && topology.hasSessionSupport()) {
opOptions = args[args.length - 2];
if (opOptions == null || opOptions.session == null) {
session = topology.startSession();
const optionsIndex = args.length - 2;
args[optionsIndex] = Object.assign({}, args[optionsIndex], { session: session });
} else if (opOptions.session && opOptions.session.hasEnded) {
throw new MongoError('Use of expired sessions is not permitted');
}
}
const makeExecuteCallback = (resolve, reject) =>
function executeCallback(err, result) {
if (session && !options.returnsCursor) {
session.endSession(() => {
delete opOptions.session;
if (err) return reject(err);
if (resultMutator) return resolve(resultMutator(result));
resolve(result);
});
} else {
if (err) return reject(err);
if (resultMutator) return resolve(resultMutator(result));
resolve(result);
}
};
// Execute using callback
if (typeof callback === 'function') {
callback = args.pop();
const handler = makeExecuteCallback(
result => callback(null, result),
err => callback(err, null)
);
args.push(handler);
try {
return operation.apply(null, args);
} catch (e) {
handler(e);
throw e;
}
}
// Return a Promise
if (args[args.length - 1] != null) {
throw new TypeError('final argument to `executeOperation` must be a callback');
}
return new Promise(function(resolve, reject) {
const handler = makeExecuteCallback(resolve, reject);
args[args.length - 1] = handler;
try {
return operation.apply(null, args);
} catch (e) {
handler(e);
}
});
};
exports.filterOptions = filterOptions;
exports.mergeOptions = mergeOptions;
exports.translateOptions = translateOptions;
exports.shallowClone = shallowClone;
exports.getSingleProperty = getSingleProperty;
exports.checkCollectionName = checkCollectionName;
exports.toError = toError;
exports.formattedOrderClause = formattedOrderClause;
exports.parseIndexOptions = parseIndexOptions;
exports.normalizeHintField = normalizeHintField;
exports.handleCallback = handleCallback;
exports.decorateCommand = decorateCommand;
exports.isObject = isObject;
exports.debugOptions = debugOptions;
exports.MAX_JS_INT = 0x20000000000000;
exports.mergeOptionsAndWriteConcern = mergeOptionsAndWriteConcern;
exports.translateReadPreference = translateReadPreference;
exports.executeOperation = executeOperation;
| 1 | 14,201 | can't this just be left undefined/null? | mongodb-node-mongodb-native | js |
@@ -64,8 +64,8 @@ module EsSpecHelper
def es_execute_with_retries(retries = 3, &block)
begin
retries -= 1
- response = block.call
- rescue SearchUnavailable => _error
+ block.call
+ rescue SearchUnavailable => error
if retries > 0
retry
else | 1 | require 'elasticsearch/extensions/test/cluster'
module EsSpecHelper
def es_mock_bad_gateway
allow_any_instance_of(Elasticsearch::Transport::Client)
.to receive(:perform_request)
.and_raise(Elasticsearch::Transport::Transport::Errors::BadGateway, "oops, can't find ES service")
end
def es_mock_connection_failed
allow_any_instance_of(Elasticsearch::Transport::Client)
.to receive(:perform_request)
.and_raise(Faraday::ConnectionFailed, "oops, connection failed")
end
def start_es_server
# circleci has locally installed version of elasticsearch so alter PATH to find
ENV["PATH"] = "./elasticsearch/bin:#{ENV["PATH"]}"
es_test_cluster_opts = {
nodes: 1,
path_logs: "tmp/es-logs"
}
unless es_server_running?
Elasticsearch::Extensions::Test::Cluster.start(es_test_cluster_opts)
end
end
def stop_es_server
if es_server_running?
Elasticsearch::Extensions::Test::Cluster.stop
end
end
def es_server_running?
Elasticsearch::Extensions::Test::Cluster.running?
end
def create_es_index(klass)
errors = []
completed = 0
output_if_debug_true { "Creating Index for class #{klass}" }
klass.__elasticsearch__.create_index!(force: true, index: klass.index_name)
klass.__elasticsearch__.refresh_index!
klass.__elasticsearch__.import(return: "errors", batch_size: 200) do |resp|
# show errors immediately (rather than buffering them)
errors += resp["items"].select { |k, v| k.values.first["error"] }
completed += resp["items"].size
output_if_debug_true { "Finished #{completed} items" }
STDERR.flush
STDOUT.flush
if errors.size > 0 && ENV["ES_DEBUG"]
STDOUT.puts "ERRORS in #{$$}:"
STDOUT.puts errors.pretty_inspect
end
end
output_if_debug_true { "Refreshing index for class #{klass}" }
klass.__elasticsearch__.refresh_index!
end
# h/t https://devmynd.com/blog/2014-2-dealing-with-failing-elasticserach-tests/
def es_execute_with_retries(retries = 3, &block)
begin
retries -= 1
response = block.call
rescue SearchUnavailable => _error
if retries > 0
retry
else
puts "retries: #{retries}"
raise error
end
end
end
def output_if_debug_true
if ENV["ES_DEBUG"]
puts yield
end
end
end
RSpec.configure do |config|
include EsSpecHelper
config.before :each, elasticsearch: true do
start_es_server unless es_server_running?
create_es_index(Proposal)
end
config.after :suite do
stop_es_server
end
end
| 1 | 17,014 | changed bc we are using the `error` var below | 18F-C2 | rb |
@@ -172,13 +172,7 @@ class TestKnowledgeRestfulAPI(APITestCase):
user_id=1,
)
- answer = get_answer.json
-
- expected_result = dict(
- status=404,
- )
-
- assert answer['status'] == expected_result['status']
+ assert get_answer.status_code == 404
def test_get_knwkb_mappings(self):
"""Test the return of list of mappings.""" | 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Test knowledge REST API."""
from __future__ import print_function
from invenio.base.wrappers import lazy_import
from invenio.ext.restful.utils import APITestCase
from invenio.ext.sqlalchemy.utils import session_manager
from invenio.testsuite import make_test_suite, run_test_suite
db = lazy_import('invenio.ext.sqlalchemy.db')
class TestKnowledgeRestfulAPI(APITestCase):
"""Test REST API of mappings."""
@session_manager
def setUp(self):
"""Run before each test."""
from invenio.modules.knowledge.models import KnwKB, KnwKBRVAL
self.kb_a = KnwKB(name='example1', description='test description',
kbtype='w')
db.session.add(self.kb_a)
# add kbrval
key = "testkey1"
value = "testvalue1"
self.kb_a.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval (with different key and same value)
key = "testkey1_1"
value = "testvalue1"
self.kb_a.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey2"
value = "testvalue2"
self.kb_a.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey3"
value = "testvalue3"
self.kb_a.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey4"
value = "testvalue4"
self.kb_a.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
self.kb_b = KnwKB(name='example2', description='test description 2',
kbtype='w')
db.session.add(self.kb_b)
# add kbrval
key = "testkey1b"
value = "testvalue1b"
self.kb_b.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey2b"
value = "testvalue2b"
self.kb_b.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey3b"
value = "testvalue3b"
self.kb_b.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
# add kbrval
key = "testkey4b"
value = "testvalue4b"
self.kb_b.kbrvals.set(KnwKBRVAL(m_key=key, m_value=value))
@session_manager
def tearDown(self):
"""Run after every test."""
from invenio.modules.knowledge.models import KnwKB
db.session.delete(KnwKB.query.filter_by(id=self.kb_a.id).one())
db.session.delete(KnwKB.query.filter_by(id=self.kb_b.id).one())
def test_get_knwkb_ok(self):
"""Test return a knowledge."""
per_page = 2
get_answer = self.get(
'knwkbresource',
urlargs={
'slug': self.kb_a.slug,
'page': 1,
'per_page': per_page,
'from': '2'
},
user_id=1
)
answer = get_answer.json
assert answer['name'] == 'example1'
assert answer['type'] == 'w'
assert answer['description'] == 'test description'
assert answer['mappings'][0]['from'] == 'testkey2'
assert answer['mappings'][0]['to'] == 'testvalue2'
assert len(answer['mappings']) == 1
def test_get_knwkb_search_key_return_empty(self):
"""Test return a knowledge with search key that returns empty."""
per_page = 4
get_answer = self.get(
'knwkbresource',
urlargs={
'slug': self.kb_b.slug,
'page': 1,
'per_page': per_page,
'from': 'not_existing_mapping_from'
},
user_id=1
)
answer = get_answer.json
assert len(answer['mappings']) == 0
def test_get_knwkb_search_key(self):
"""Test return a knowledge with search key."""
per_page = 4
get_answer = self.get(
'knwkbresource',
urlargs={
'slug': self.kb_b.slug,
'page': 1,
'per_page': per_page,
'from': 'testkey1b'
},
user_id=1
)
answer = get_answer.json
assert answer['name'] == 'example2'
assert answer['type'] == 'w'
assert answer['description'] == 'test description 2'
assert answer['mappings'][0]['from'] == 'testkey1b'
assert answer['mappings'][0]['to'] == 'testvalue1b'
assert len(answer['mappings']) == 1
def test_get_knwkb_not_exist(self):
"""Test return a knowledge that not exists."""
slug = 'testsuite-slug-not-exists-123'
get_answer = self.get(
'knwkbresource',
urlargs=dict(slug=slug),
user_id=1,
)
answer = get_answer.json
expected_result = dict(
status=404,
)
assert answer['status'] == expected_result['status']
def test_get_knwkb_mappings(self):
"""Test the return of list of mappings."""
get_answer = self.get(
'knwkbmappingsresource',
urlargs=dict(
slug=self.kb_a.slug,
page=1,
per_page=10,
to="2"
),
user_id=1,
)
answer = get_answer.json
assert answer[0]['from'] == 'testkey2'
assert answer[0]['to'] == 'testvalue2'
def test_get_knwkb_mapping_to_unique_ok(self):
"""Test the return of unique "mappings to" list."""
per_page = 4
get_answer = self.get(
'knwkbmappingstoresource',
urlargs={
'slug': self.kb_a.slug,
'page': 1,
'per_page': per_page,
'unique': '1'
},
user_id=1
)
answer = get_answer.json
assert isinstance(answer, list)
assert 'testvalue1' in answer
assert 'testvalue2' in answer
assert 'testvalue3' in answer
assert 'testvalue4' in answer
assert len(answer) == 4
def test_get_knwkb_mapping_to_ok(self):
"""Test the return of "mappings to" list."""
per_page = 4
get_answer = self.get(
'knwkbmappingstoresource',
urlargs={
'slug': self.kb_a.slug,
'page': 1,
'per_page': per_page,
},
user_id=1
)
answer = get_answer.json
assert isinstance(answer, list)
assert 'testvalue1' in answer
assert 'testvalue2' in answer
assert 'testvalue3' in answer
assert 'testvalue4' not in answer
assert len(answer) == 4
def test_not_allowed_url(self):
"""Check not allowed url."""
paths = [
'foo',
'foo/bar',
'123',
'test/url/foo',
]
for path in paths:
self.get(
'notimplementedknowledegeresource',
urlargs={
'slug': self.kb_a.slug,
'foo': path,
},
user_id=1,
code=405,
)
self.head(
'notimplementedknowledegeresource',
urlargs={
'slug': self.kb_a.slug,
'foo': path,
},
user_id=1,
code=405,
)
self.options(
'notimplementedknowledegeresource',
urlargs={
'slug': self.kb_a.slug,
'foo': path,
},
user_id=1,
code=405,
)
self.post(
'notimplementedknowledegeresource',
urlargs={
'slug': self.kb_a.slug,
'foo': path,
},
user_id=1,
code=405,
)
self.put(
'notimplementedknowledegeresource',
urlargs={
'slug': self.kb_a.slug,
'foo': path,
},
user_id=1,
code=405,
)
TEST_SUITE = make_test_suite(TestKnowledgeRestfulAPI)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| 1 | 16,526 | Is there a reason why this is change is in the same commit? | inveniosoftware-invenio | py |
@@ -77,7 +77,7 @@ func (r *repo) GetClonedBranch() string {
// Copy does copying the repository to the given destination.
func (r *repo) Copy(dest string) (Repo, error) {
- cmd := exec.Command("cp", "-rf", r.dir, dest)
+ cmd := copyCommand(r.dir, dest)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, formatCommandError(err, out) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package git
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
var (
ErrNoChange = errors.New("no change")
)
// Repo provides functions to get and handle git data.
type Repo interface {
GetPath() string
GetClonedBranch() string
Copy(dest string) (Repo, error)
ListCommits(ctx context.Context, visionRange string) ([]Commit, error)
GetLatestCommit(ctx context.Context) (Commit, error)
GetCommitHashForRev(ctx context.Context, rev string) (string, error)
ChangedFiles(ctx context.Context, from, to string) ([]string, error)
Checkout(ctx context.Context, commitish string) error
CheckoutPullRequest(ctx context.Context, number int, branch string) error
Clean() error
Pull(ctx context.Context, branch string) error
Push(ctx context.Context, branch string) error
CommitChanges(ctx context.Context, branch, message string, newBranch bool, changes map[string][]byte) error
}
type repo struct {
dir string
gitPath string
remote string
clonedBranch string
}
// NewRepo creates a new Repo instance.
func NewRepo(dir, gitPath, remote, clonedBranch string) *repo {
return &repo{
dir: dir,
gitPath: gitPath,
remote: remote,
clonedBranch: clonedBranch,
}
}
// GetPath returns the path to the local git directory.
func (r *repo) GetPath() string {
return r.dir
}
// GetClonedBranch returns the name of cloned branch.
func (r *repo) GetClonedBranch() string {
return r.clonedBranch
}
// Copy does copying the repository to the given destination.
func (r *repo) Copy(dest string) (Repo, error) {
cmd := exec.Command("cp", "-rf", r.dir, dest)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, formatCommandError(err, out)
}
return &repo{
dir: dest,
gitPath: r.gitPath,
remote: r.remote,
clonedBranch: r.clonedBranch,
}, nil
}
// ListCommits returns a list of commits in a given revision range.
func (r *repo) ListCommits(ctx context.Context, revisionRange string) ([]Commit, error) {
args := []string{
"log",
"--no-decorate",
fmt.Sprintf("--pretty=format:%s", commitLogFormat),
}
if revisionRange != "" {
args = append(args, revisionRange)
}
out, err := r.runGitCommand(ctx, args...)
if err != nil {
return nil, formatCommandError(err, out)
}
return parseCommits(string(out))
}
// GetLatestCommit returns the most recent commit of current branch.
func (r *repo) GetLatestCommit(ctx context.Context) (Commit, error) {
commits, err := r.ListCommits(ctx, "-1")
if err != nil {
return Commit{}, err
}
if len(commits) != 1 {
return Commit{}, fmt.Errorf("commits must contain one item, got: %d", len(commits))
}
return commits[0], nil
}
// GetCommitHashForRev returns the hash value of the commit for a given rev.
func (r *repo) GetCommitHashForRev(ctx context.Context, rev string) (string, error) {
out, err := r.runGitCommand(ctx, "rev-parse", rev)
if err != nil {
return "", formatCommandError(err, out)
}
return strings.TrimSpace(string(out)), nil
}
// ChangedFiles returns a list of files those were touched between two commits.
func (r *repo) ChangedFiles(ctx context.Context, from, to string) ([]string, error) {
out, err := r.runGitCommand(ctx, "diff", "--name-only", from, to)
if err != nil {
return nil, formatCommandError(err, out)
}
var (
lines = strings.Split(string(out), "\n")
files = make([]string, 0, len(lines))
)
// The result may include some empty lines
// so we need to remove all of them.
for _, f := range lines {
if f != "" {
files = append(files, f)
}
}
return files, nil
}
// Checkout checkouts to a given commitish.
func (r *repo) Checkout(ctx context.Context, commitish string) error {
out, err := r.runGitCommand(ctx, "checkout", commitish)
if err != nil {
return formatCommandError(err, out)
}
return nil
}
// CheckoutPullRequest checkouts to the latest commit of a given pull request.
func (r *repo) CheckoutPullRequest(ctx context.Context, number int, branch string) error {
target := fmt.Sprintf("pull/%d/head:%s", number, branch)
out, err := r.runGitCommand(ctx, "fetch", r.remote, target)
if err != nil {
return formatCommandError(err, out)
}
return r.Checkout(ctx, branch)
}
// Pull fetches from and integrate with a local branch.
func (r *repo) Pull(ctx context.Context, branch string) error {
out, err := r.runGitCommand(ctx, "pull", r.remote, branch)
if err != nil {
return formatCommandError(err, out)
}
return nil
}
// Push pushes local changes of a given branch to the remote.
func (r *repo) Push(ctx context.Context, branch string) error {
out, err := r.runGitCommand(ctx, "push", r.remote, branch)
if err != nil {
return formatCommandError(err, out)
}
return nil
}
// CommitChanges commits some changes into a branch.
func (r *repo) CommitChanges(ctx context.Context, branch, message string, newBranch bool, changes map[string][]byte) error {
if newBranch {
if err := r.checkoutNewBranch(ctx, branch); err != nil {
return fmt.Errorf("failed to checkout new branch, branch: %v, error: %v", branch, err)
}
} else {
if err := r.Checkout(ctx, branch); err != nil {
return fmt.Errorf("failed to checkout branch, branch: %v, error: %v", branch, err)
}
}
// Apply the changes.
for p, bytes := range changes {
filePath := filepath.Join(r.dir, p)
dirPath := filepath.Dir(filePath)
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
if err := os.MkdirAll(dirPath, os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory, dir: %s, err: %v", dirPath, err)
}
}
if err := ioutil.WriteFile(filePath, bytes, os.ModePerm); err != nil {
return fmt.Errorf("failed to write file, file: %s, error: %v", filePath, err)
}
}
// Commit the changes.
if err := r.addCommit(ctx, message); err != nil {
return fmt.Errorf("failed to commit, branch: %s, error: %v", branch, err)
}
return nil
}
// Clean deletes all local git data.
func (r repo) Clean() error {
return os.RemoveAll(r.dir)
}
func (r *repo) checkoutNewBranch(ctx context.Context, branch string) error {
out, err := r.runGitCommand(ctx, "checkout", "-b", branch)
if err != nil {
return formatCommandError(err, out)
}
return nil
}
func (r repo) addCommit(ctx context.Context, message string) error {
out, err := r.runGitCommand(ctx, "add", ".")
if err != nil {
return formatCommandError(err, out)
}
out, err = r.runGitCommand(ctx, "commit", "-m", message)
if err != nil {
msg := string(out)
if strings.Contains(msg, "nothing to commit, working tree clean") {
return ErrNoChange
}
return formatCommandError(err, out)
}
return nil
}
// setUser configures username and email for local user of this repo.
func (r *repo) setUser(ctx context.Context, username, email string) error {
if out, err := r.runGitCommand(ctx, "config", "user.name", username); err != nil {
return formatCommandError(err, out)
}
if out, err := r.runGitCommand(ctx, "config", "user.email", email); err != nil {
return formatCommandError(err, out)
}
return nil
}
func (r *repo) setRemote(ctx context.Context, remote string) error {
out, err := r.runGitCommand(ctx, "remote", "set-url", "origin", remote)
if err != nil {
return formatCommandError(err, out)
}
return nil
}
func (r *repo) runGitCommand(ctx context.Context, args ...string) ([]byte, error) {
cmd := exec.CommandContext(ctx, r.gitPath, args...)
cmd.Dir = r.dir
return cmd.CombinedOutput()
}
func formatCommandError(err error, out []byte) error {
return fmt.Errorf("err: %w, out: %s", err, string(out))
}
| 1 | 12,565 | @nghialv btw, perhaps was this method originally created for copying the repo root to the given `dest` as a subdirectory? | pipe-cd-pipe | go |
@@ -205,7 +205,7 @@ namespace pwiz.Skyline.Model.Lib
// ReSharper disable LocalizableElement
private bool LoadLibraryFromDatabase(ILoadMonitor loader)
{
- var status = new ProgressStatus(
+ IProgressStatus status = new ProgressStatus(
string.Format(Resources.ChromatogramLibrary_LoadLibraryFromDatabase_Reading_precursors_from__0_,
Name));
try | 1 | /*
* Original author: Nicholas Shulman <nicksh .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2017 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Data;
using System.Data.Common;
using System.Data.SQLite;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Text;
using System.Xml;
using System.Xml.Serialization;
using pwiz.Common.Collections;
using pwiz.Common.Database;
using pwiz.Common.SystemUtil;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Lib.ChromLib;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Model.RetentionTimes;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.Skyline.Util.Extensions;
// ReSharper disable InvokeAsExtensionMethod
namespace pwiz.Skyline.Model.Lib
{
[XmlRoot("elib_spec")]
public sealed class EncyclopeDiaSpec : LibrarySpec
{
public const string EXT = ".elib";
public static string FILTER_ELIB
{
get { return TextUtil.FileDialogFilter(Resources.EncyclopediaSpec_FILTER_ELIB_EncyclopeDIA_Library, EXT); }
}
public EncyclopeDiaSpec(string name, string path)
: base(name, path)
{
}
public override Library LoadLibrary(ILoadMonitor loader)
{
return EncyclopeDiaLibrary.Load(this, loader);
}
public override IEnumerable<PeptideRankId> PeptideRankIds
{
get { return new[] { PEP_RANK_PICKED_INTENSITY }; }
}
public override string Filter
{
get { return FILTER_ELIB; }
}
#region Implementation of IXmlSerializable
/// <summary>
/// For serialization
/// </summary>
private EncyclopeDiaSpec()
{
}
public static EncyclopeDiaSpec Deserialize(XmlReader reader)
{
return reader.Deserialize(new EncyclopeDiaSpec());
}
#endregion
}
[XmlRoot("elib_library")]
public sealed class EncyclopeDiaLibrary : CachedLibrary<EncyclopeDiaLibrary.ElibSpectrumInfo>
{
private const int FORMAT_VERSION_CACHE = 5;
private const double MIN_QUANTITATIVE_INTENSITY = 1.0;
private ImmutableList<string> _sourceFiles;
private readonly PooledSqliteConnection _pooledSqliteConnection;
private EncyclopeDiaLibrary()
{
}
public override void ReadXml(XmlReader reader)
{
base.ReadXml(reader);
reader.Read();
}
public static string FILTER_ELIB
{
get { return TextUtil.FileDialogFilter(Resources.EncyclopediaLibrary_FILTER_ELIB_EncyclopeDIA_Libraries, EncyclopeDiaSpec.EXT); }
}
public EncyclopeDiaLibrary(EncyclopeDiaSpec spec) : base(spec)
{
LibrarySpec = spec;
FilePath = spec.FilePath;
CachePath = GetLibraryCachePath(FilePath);
}
private EncyclopeDiaLibrary(EncyclopeDiaSpec spec, IStreamManager streamManager) : this(spec)
{
_pooledSqliteConnection = new PooledSqliteConnection(streamManager.ConnectionPool, FilePath);
}
public EncyclopeDiaSpec LibrarySpec { get; private set; }
public string FilePath { get; private set; }
public override int CompareRevisions(Library library)
{
return 0;
}
protected override LibrarySpec CreateSpec()
{
return new EncyclopeDiaSpec(Name, FilePath);
}
public override bool IsSameLibrary(Library library)
{
var encyclopediaLibrary = library as EncyclopeDiaLibrary;
if (encyclopediaLibrary == null)
{
return false;
}
return Equals(Name, encyclopediaLibrary.Name);
}
public override LibraryDetails LibraryDetails
{
get
{
return new LibraryDetails
{
DataFiles = _sourceFiles.Select(file => new SpectrumSourceFileDetails(file))
};
}
}
public override IPooledStream ReadStream
{
get { return _pooledSqliteConnection; }
}
public override string SpecFilter
{
get { return TextUtil.FileDialogFiltersAll(FILTER_ELIB); }
}
public static EncyclopeDiaLibrary Load(EncyclopeDiaSpec spec, ILoadMonitor loader)
{
if (File.Exists(spec.FilePath) && new FileInfo(spec.FilePath).Length > 0)
{
var library = new EncyclopeDiaLibrary(spec, loader.StreamManager);
if (library.Load(loader))
return library;
}
return null;
}
private bool Load(ILoadMonitor loader)
{
try
{
if (LoadFromCache(loader))
{
return true;
}
if (LoadLibraryFromDatabase(loader))
{
WriteCache(loader);
return true;
}
}
catch (Exception e)
{
if (!loader.IsCanceled)
{
var msgException = new ApplicationException(string.Format(Resources.BiblioSpecLiteLibrary_Load_Failed_loading_library__0__, FilePath), e);
loader.UpdateProgress(new ProgressStatus().ChangeErrorException(msgException));
}
}
return false;
}
// ReSharper disable LocalizableElement
private bool LoadLibraryFromDatabase(ILoadMonitor loader)
{
var status = new ProgressStatus(
string.Format(Resources.ChromatogramLibrary_LoadLibraryFromDatabase_Reading_precursors_from__0_,
Name));
try
{
loader.UpdateProgress(status);
var libKeySourceFileDatas = new Dictionary<Tuple<string, int>, Dictionary<string, Tuple<double?, FileData>>>();
var scores = new EncyclopeDiaScores();
scores.ReadScores(_pooledSqliteConnection.Connection);
HashSet<Tuple<string, int>> quantPeptides = new HashSet<Tuple<string, int>>();
using (var cmd = new SQLiteCommand(_pooledSqliteConnection.Connection))
{
// From the "entries" table, read all of the peptides that were actually found
cmd.CommandText =
"SELECT PeptideModSeq, PrecursorCharge, SourceFile, Score, RTInSeconds, RTInSecondsStart, RTInSecondsStop FROM entries";
using (var reader = cmd.ExecuteReader())
{
while (reader.Read())
{
if (loader.IsCanceled)
{
throw new OperationCanceledException();
}
var libKey = Tuple.Create(reader.GetString(0), Convert.ToInt32(reader.GetValue(1)));
// Tuple of filename, score, FileData
Dictionary<string, Tuple<double?, FileData>> dataByFilename;
if (!libKeySourceFileDatas.TryGetValue(libKey, out dataByFilename))
{
dataByFilename = new Dictionary<string, Tuple<double?, FileData>>();
libKeySourceFileDatas.Add(libKey, dataByFilename);
}
string fileName = reader.GetString(2);
if (dataByFilename.ContainsKey(fileName))
{
continue;
}
double score = reader.GetDouble(3);
var qValue = scores.GetQValue(libKey.Item1, libKey.Item2, fileName) ??
ExplicitPeakBounds.UNKNOWN_SCORE;
dataByFilename.Add(fileName, Tuple.Create((double?) score,
new FileData(reader.GetDouble(4)/60,
new ExplicitPeakBounds(reader.GetDouble(5)/60, reader.GetDouble(6)/60, qValue))));
}
}
// Also, read the PeptideQuants table in order to get peak boundaries for any peptide&sourcefiles that were
// not found in the Entries table.
cmd.CommandText =
"SELECT PeptideModSeq, PrecursorCharge, SourceFile, RTInSecondsStart, RTInSecondsStop FROM PeptideQuants";
using (var reader = cmd.ExecuteReader())
{
while (reader.Read())
{
var libKey = Tuple.Create(reader.GetString(0), Convert.ToInt32(reader.GetValue(1)));
quantPeptides.Add(libKey);
// Tuple of filename, score, FileData
Dictionary<string, Tuple<double?, FileData>> dataByFilename;
if (!libKeySourceFileDatas.TryGetValue(libKey, out dataByFilename))
{
dataByFilename = new Dictionary<string, Tuple<double?, FileData>>();
libKeySourceFileDatas.Add(libKey, dataByFilename);
}
string fileName = reader.GetString(2);
if (dataByFilename.ContainsKey(fileName))
{
continue;
}
double qValue = scores.GetQValue(libKey.Item1, libKey.Item2, fileName) ??
ExplicitPeakBounds.UNKNOWN_SCORE;
dataByFilename.Add(fileName,
Tuple.Create((double?) null,
new FileData(null, new ExplicitPeakBounds(reader.GetDouble(3)/60, reader.GetDouble(4)/60, qValue))));
}
}
}
// ReSharper disable PossibleMultipleEnumeration
var sourceFiles = libKeySourceFileDatas
.SelectMany(entry => entry.Value.Keys)
.Distinct()
.ToArray();
Array.Sort(sourceFiles);
var sourceFileIds = sourceFiles.Select((file, index) => Tuple.Create(file, index))
.ToDictionary(tuple => tuple.Item1, tuple => tuple.Item2);
var spectrumInfos = libKeySourceFileDatas
.Where(entry => quantPeptides.Contains(entry.Key))
.Select(entry => MakeSpectrumInfo(entry.Key.Item1, entry.Key.Item2, entry.Value, sourceFileIds));
SetLibraryEntries(spectrumInfos);
_sourceFiles = ImmutableList.ValueOf(sourceFiles);
// ReSharper restore PossibleMultipleEnumeration
loader.UpdateProgress(status.Complete());
return true;
}
catch (Exception e)
{
e = new InvalidDataException(string.Format(Resources.BiblioSpecLiteLibrary_Load_Failed_loading_library__0__, FilePath), e);
loader.UpdateProgress(status.ChangeErrorException(e));
return false;
}
}
// ReSharper restore LocalizableElement
private void WriteCache(ILoadMonitor loader)
{
using (FileSaver fs = new FileSaver(CachePath, loader.StreamManager))
{
using (var stream = loader.StreamManager.CreateStream(fs.SafeName, FileMode.Create, true))
{
PrimitiveArrays.WriteOneValue(stream, FORMAT_VERSION_CACHE);
PrimitiveArrays.WriteOneValue(stream, _sourceFiles.Count);
foreach (var file in _sourceFiles)
{
byte[] fileNameBytes = Encoding.UTF8.GetBytes(file);
PrimitiveArrays.WriteOneValue(stream, fileNameBytes.Length);
PrimitiveArrays.Write(stream, fileNameBytes);
}
PrimitiveArrays.WriteOneValue(stream, _libraryEntries.Length);
foreach (var elibSpectrumInfo in _libraryEntries)
{
elibSpectrumInfo.Write(stream);
}
loader.StreamManager.Finish(stream);
fs.Commit();
loader.StreamManager.SetCache(FilePath, CachePath);
}
}
}
private static string GetLibraryCachePath(string filepath)
{
return Path.ChangeExtension(filepath, @".elibc");
}
private bool LoadFromCache(ILoadMonitor loader)
{
if (!loader.StreamManager.IsCached(FilePath, CachePath))
{
return false;
}
try
{
ValueCache valueCache = new ValueCache();
using (var stream = loader.StreamManager.CreateStream(CachePath, FileMode.Open, true))
{
int version = PrimitiveArrays.ReadOneValue<int>(stream);
if (version != FORMAT_VERSION_CACHE)
{
return false;
}
int fileCount = PrimitiveArrays.ReadOneValue<int>(stream);
List<String> sourceFiles = new List<string>(fileCount);
while (sourceFiles.Count < fileCount)
{
int byteCount = PrimitiveArrays.ReadOneValue<int>(stream);
byte[] bytes = new byte[byteCount];
stream.Read(bytes, 0, bytes.Length);
sourceFiles.Add(Encoding.UTF8.GetString(bytes));
}
int spectrumInfoCount = PrimitiveArrays.ReadOneValue<int>(stream);
_sourceFiles = ImmutableList.ValueOf(sourceFiles);
List<ElibSpectrumInfo> spectrumInfos = new List<ElibSpectrumInfo>();
while (spectrumInfos.Count < spectrumInfoCount)
{
spectrumInfos.Add(ElibSpectrumInfo.Read(valueCache, stream));
}
SetLibraryEntries(spectrumInfos);
return true;
}
}
catch (Exception exception)
{
Trace.TraceWarning(@"Exception loading cache: {0}", exception);
return false;
}
}
protected override SpectrumPeaksInfo.MI[] ReadSpectrum(ElibSpectrumInfo info)
{
return ReadSpectrum(info, info.BestFileId);
}
private SpectrumPeaksInfo.MI[] ReadSpectrum(ElibSpectrumInfo info, int sourceFileId)
{
if (sourceFileId < 0)
{
return null;
}
return _pooledSqliteConnection.ExecuteWithConnection(connection =>
{
HashSet<double> mzs = new HashSet<double>();
List<SpectrumPeaksInfo.MI> spectrum = new List<SpectrumPeaksInfo.MI>();
// First read all of the quantifiable transitions from the PeptideQuants table.
var peptideQuantSpectrum = ReadSpectrumFromPeptideQuants(connection, info);
if (peptideQuantSpectrum != null)
{
foreach (var mi in peptideQuantSpectrum)
{
if (mzs.Add(mi.Mz))
{
spectrum.Add(mi);
}
}
}
// Then read the spectrum for the specific file
var entriesSpectrum = ReadSpectrumFromEntriesTable(connection, info, sourceFileId);
foreach (var mi in entriesSpectrum)
{
if (mzs.Add(mi.Mz))
{
var miToAdd = mi;
if (peptideQuantSpectrum != null)
{
// If we successfully read from the PeptideQuants table, then the
// rest of the mzs we find in the entries table are non-quantitative.
miToAdd.Quantitative = false;
}
else
{
// If we were unable to read from the PeptideQuants table, then
// the non-quantitative transitions are the ones with really low intensity.
miToAdd.Quantitative = miToAdd.Intensity >= MIN_QUANTITATIVE_INTENSITY;
}
spectrum.Add(miToAdd);
}
}
return spectrum.ToArray();
});
}
private IEnumerable<SpectrumPeaksInfo.MI> ReadSpectrumFromPeptideQuants(SQLiteConnection connection, ElibSpectrumInfo info)
{
using (var cmd = new SQLiteCommand(connection))
{
cmd.CommandText = @"SELECT QuantIonMassLength, QuantIonMassArray, QuantIonIntensityLength, QuantIonIntensityArray FROM peptidequants WHERE PrecursorCharge = ? AND PeptideModSeq = ?";
cmd.Parameters.Add(new SQLiteParameter(DbType.Int32) { Value = info.Key.Charge });
cmd.Parameters.Add(new SQLiteParameter(DbType.String) { Value = info.PeptideModSeq });
SQLiteDataReader reader;
try
{
reader = cmd.ExecuteReader();
}
catch (DbException)
{
// Older .elib files do not have these columns, so just return null
return null;
}
using (reader)
{
if (!reader.Read())
{
// None of the transitions are considered Quantifiable.
return new SpectrumPeaksInfo.MI[0];
}
double[] mzs = PrimitiveArrays.FromBytes<double>(
PrimitiveArrays.ReverseBytesInBlocks(
UncompressEncyclopeDiaData((byte[])reader.GetValue(1), reader.GetInt32(0)),
sizeof(double)));
float[] intensities =
PrimitiveArrays.FromBytes<float>(PrimitiveArrays.ReverseBytesInBlocks(
UncompressEncyclopeDiaData((byte[])reader.GetValue(3), reader.GetInt32(2)), sizeof(float)));
return mzs.Select(
(mz, index) => new SpectrumPeaksInfo.MI { Mz = mz, Intensity = intensities[index]});
}
}
}
private IEnumerable<SpectrumPeaksInfo.MI> ReadSpectrumFromEntriesTable(SQLiteConnection connection, ElibSpectrumInfo info,
int sourceFileId)
{
using (var cmd = new SQLiteCommand(connection))
{
cmd.CommandText =
@"SELECT MassEncodedLength, MassArray, IntensityEncodedLength, IntensityArray FROM entries WHERE PrecursorCharge = ? AND PeptideModSeq = ? AND SourceFile = ?";
cmd.Parameters.Add(new SQLiteParameter(DbType.Int32) {Value = info.Key.Charge});
cmd.Parameters.Add(new SQLiteParameter(DbType.String) {Value = info.PeptideModSeq});
cmd.Parameters.Add(new SQLiteParameter(DbType.String) {Value = _sourceFiles[sourceFileId]});
using (var reader = cmd.ExecuteReader())
{
if (reader.Read())
{
double[] mzs = PrimitiveArrays.FromBytes<double>(
PrimitiveArrays.ReverseBytesInBlocks(
UncompressEncyclopeDiaData((byte[]) reader.GetValue(1), reader.GetInt32(0)),
sizeof(double)));
float[] intensities =
PrimitiveArrays.FromBytes<float>(PrimitiveArrays.ReverseBytesInBlocks(
UncompressEncyclopeDiaData((byte[]) reader.GetValue(3), reader.GetInt32(2)),
sizeof(float)));
return mzs.Select((mz, index) => new SpectrumPeaksInfo.MI
{
Mz = mz,
Intensity = intensities[index],
}) // CONSIDER(bspratt): annotation?
.ToArray();
}
return null;
}
}
}
/// <summary>
/// Uncompress a block of data found in an EncyclopeDIA library.
/// </summary>
private byte[] UncompressEncyclopeDiaData(byte[] compressedBytes, int uncompressedSize)
{
// Pass -1 in for uncompressed length since EnclyclopeDIA always compresses
// the bytes even if the uncompressed size is equal to the compresssed size.
byte[] uncompressedBytes = UtilDB.Uncompress(compressedBytes, -1, false);
if (uncompressedBytes.Length != uncompressedSize)
{
throw new IOException(Resources.UtilDB_Uncompress_Failure_uncompressing_data);
}
return uncompressedBytes;
}
protected override SpectrumHeaderInfo CreateSpectrumHeaderInfo(ElibSpectrumInfo info)
{
return new ChromLibSpectrumHeaderInfo(Name, 0);
}
public override LibraryFiles LibraryFiles
{
get
{
return new LibraryFiles{FilePaths = _sourceFiles};
}
}
public override ExplicitPeakBounds GetExplicitPeakBounds(MsDataFileUri filePath, IEnumerable<Target> peptideSequences)
{
int fileId = FindFileInList(filePath, _sourceFiles);
if (fileId < 0)
{
return null;
}
bool anyMatch = false;
foreach (var entry in LibraryEntriesWithSequences(peptideSequences))
{
FileData fileData;
if (entry.FileDatas.TryGetValue(fileId, out fileData))
{
return fileData.PeakBounds;
}
if (entry.FileDatas.Any())
{
anyMatch = true;
}
}
if (anyMatch)
{
return ExplicitPeakBounds.EMPTY;
}
return null;
}
public override IEnumerable<SpectrumInfoLibrary> GetSpectra(LibKey key, IsotopeLabelType labelType, LibraryRedundancy redundancy)
{
int iEntry = FindEntry(key);
if (iEntry < 0)
{
return new SpectrumInfoLibrary[0];
}
var entry = _libraryEntries[iEntry];
return entry.FileDatas.Where(kvp =>
{
if (!kvp.Value.ApexTime.HasValue)
{
return false;
}
if (redundancy == LibraryRedundancy.best && kvp.Key != entry.BestFileId)
{
return false;
}
return true;
})
.Select(kvp =>
new SpectrumInfoLibrary(this, labelType, _sourceFiles[kvp.Key], kvp.Value.ApexTime, null,
kvp.Key == entry.BestFileId, new ElibSpectrumKey(iEntry, kvp.Key))
{
SpectrumHeaderInfo = CreateSpectrumHeaderInfo(entry)
});
}
public override SpectrumPeaksInfo LoadSpectrum(object spectrumKey)
{
var elibSpectrumKey = spectrumKey as ElibSpectrumKey;
if (null != elibSpectrumKey)
{
return new SpectrumPeaksInfo(ReadSpectrum(_libraryEntries[elibSpectrumKey.EntryIndex], elibSpectrumKey.FileId));
}
return base.LoadSpectrum(spectrumKey);
}
public override LibraryChromGroup LoadChromatogramData(object spectrumKey)
{
return null;
}
public override bool TryGetRetentionTimes(int fileId, out LibraryRetentionTimes retentionTimes)
{
return TryGetRetentionTimes(fileId, _sourceFiles[fileId], out retentionTimes);
}
public override bool TryGetRetentionTimes(MsDataFileUri filePath, out LibraryRetentionTimes retentionTimes)
{
return TryGetRetentionTimes(FindFileInList(filePath, _sourceFiles), filePath.ToString(), out retentionTimes);
}
private bool TryGetRetentionTimes(int fileId, string filePath, out LibraryRetentionTimes retentionTimes)
{
if (fileId < 0)
{
retentionTimes = null;
return false;
}
ILookup<Target, double?> timesLookup = _libraryEntries.ToLookup(
entry => entry.Key.Target,
entry =>
{
FileData fileData;
if (!entry.FileDatas.TryGetValue(fileId, out fileData))
{
return null;
}
return fileData.ApexTime;
});
var nonEmptyTimesDict = timesLookup
.Where(grouping=>grouping.Any(value=>value.HasValue))
.ToDictionary(grouping=>grouping.Key, grouping=>Tuple.Create(TimeSource.peak, grouping.OfType<double>().ToArray()));
retentionTimes = new LibraryRetentionTimes(filePath, nonEmptyTimesDict);
return true;
}
public override bool TryGetRetentionTimes(LibKey key, MsDataFileUri filePath, out double[] retentionTimes)
{
retentionTimes = null;
int i = FindEntry(key);
if (i < 0)
{
return false;
}
int fileId = FindFileInList(filePath, _sourceFiles);
if (fileId < 0)
{
return false;
}
var entry = _libraryEntries[i];
FileData fileData;
if (!entry.FileDatas.TryGetValue(fileId, out fileData))
{
return false;
}
if (!fileData.ApexTime.HasValue)
{
return false;
}
retentionTimes = new[] {fileData.ApexTime.Value};
return true;
}
public override IEnumerable<double> GetRetentionTimesWithSequences(string filePath, IEnumerable<Target> peptideSequences, ref int? iFile)
{
if (!iFile.HasValue)
iFile = FindFileInList(MsDataFileUri.Parse(filePath), _sourceFiles);
if (iFile.Value < 0)
{
return new double[0];
}
var times = new List<double>();
foreach (var entry in LibraryEntriesWithSequences(peptideSequences))
{
FileData fileData;
if (entry.FileDatas.TryGetValue(iFile.Value, out fileData))
{
if (fileData.ApexTime.HasValue)
{
times.Add(fileData.ApexTime.Value);
}
}
}
return times;
}
public override IList<RetentionTimeSource> ListRetentionTimeSources()
{
List<RetentionTimeSource> sources = new List<RetentionTimeSource>();
foreach (var sourceFile in _sourceFiles)
{
try
{
sources.Add(new RetentionTimeSource(Path.GetFileNameWithoutExtension(sourceFile), Name));
}
catch (Exception)
{
// ignore
}
}
return sources;
}
public static EncyclopeDiaLibrary Deserialize(XmlReader reader)
{
EncyclopeDiaLibrary encyclopeDiaLibrary = new EncyclopeDiaLibrary();
encyclopeDiaLibrary.ReadXml(reader);
return encyclopeDiaLibrary;
}
private static ElibSpectrumInfo MakeSpectrumInfo(string peptideModSeq, int charge,
IDictionary<string, Tuple<double?, FileData>> fileDatas, IDictionary<string, int> sourceFileIds)
{
double bestScore = double.MaxValue;
string bestFileName = null;
foreach (var entry in fileDatas)
{
if (!entry.Value.Item1.HasValue)
{
continue;
}
if (bestFileName == null || entry.Value.Item1 < bestScore)
{
bestFileName = entry.Key;
bestScore = entry.Value.Item1.Value;
}
}
return new ElibSpectrumInfo(peptideModSeq, charge, bestFileName == null ? -1 : sourceFileIds[bestFileName],
fileDatas.Select(
entry => new KeyValuePair<int, FileData>(sourceFileIds[entry.Key], entry.Value.Item2)));
}
public class ElibSpectrumInfo : ICachedSpectrumInfo
{
public ElibSpectrumInfo(String peptideModSeq, int charge, int bestFileId, IEnumerable<KeyValuePair<int, FileData>> fileDatas)
{
PeptideModSeq = peptideModSeq;
Key = new LibKey(SequenceMassCalc.NormalizeModifiedSequence(peptideModSeq), charge);
BestFileId = bestFileId;
FileDatas = ImmutableSortedList.FromValues(fileDatas);
}
public string PeptideModSeq { get; private set; }
public LibKey Key { get; private set; }
public int BestFileId { get; private set;}
public ImmutableSortedList<int, FileData> FileDatas { get; private set; }
public void Write(Stream stream)
{
PrimitiveArrays.WriteOneValue(stream, PeptideModSeq.Length);
PrimitiveArrays.Write(stream, Encoding.UTF8.GetBytes(PeptideModSeq));
PrimitiveArrays.WriteOneValue(stream, Key.Charge);
PrimitiveArrays.WriteOneValue(stream, BestFileId);
PrimitiveArrays.WriteOneValue(stream, FileDatas.Count);
foreach (var peakBoundEntry in FileDatas)
{
PrimitiveArrays.WriteOneValue(stream, peakBoundEntry.Key);
PrimitiveArrays.WriteOneValue(stream, peakBoundEntry.Value.PeakBounds.StartTime);
PrimitiveArrays.WriteOneValue(stream, peakBoundEntry.Value.PeakBounds.EndTime);
PrimitiveArrays.WriteOneValue(stream, peakBoundEntry.Value.PeakBounds.Score);
if (peakBoundEntry.Value.ApexTime.HasValue)
{
PrimitiveArrays.WriteOneValue<byte>(stream, 1);
PrimitiveArrays.WriteOneValue(stream, peakBoundEntry.Value.ApexTime.Value);
}
else
{
PrimitiveArrays.WriteOneValue<byte>(stream, 0);
}
}
}
public static ElibSpectrumInfo Read(ValueCache valueCache, Stream stream)
{
byte[] peptideModSeqBytes = new byte[PrimitiveArrays.ReadOneValue<int>(stream)];
stream.Read(peptideModSeqBytes, 0, peptideModSeqBytes.Length);
var peptideModSeq = valueCache.CacheValue(Encoding.UTF8.GetString(peptideModSeqBytes));
int charge = PrimitiveArrays.ReadOneValue<int>(stream);
int bestFileId = PrimitiveArrays.ReadOneValue<int>(stream);
int peakBoundCount = PrimitiveArrays.ReadOneValue<int>(stream);
var peakBounds = new List<KeyValuePair<int, FileData>>();
while (peakBounds.Count < peakBoundCount)
{
var fileId = PrimitiveArrays.ReadOneValue<int>(stream);
var startTime = PrimitiveArrays.ReadOneValue<double>(stream);
var endTime = PrimitiveArrays.ReadOneValue<double>(stream);
var score = PrimitiveArrays.ReadOneValue<double>(stream);
byte bHasApexTime = PrimitiveArrays.ReadOneValue<byte>(stream);
double? apexTime;
if (bHasApexTime == 0)
{
apexTime = null;
}
else
{
apexTime = PrimitiveArrays.ReadOneValue<double>(stream);
}
peakBounds.Add(new KeyValuePair<int, FileData>(fileId, new FileData(apexTime, new ExplicitPeakBounds(startTime, endTime, score))));
}
return new ElibSpectrumInfo(peptideModSeq, charge, bestFileId, peakBounds);
}
}
public class FileData
{
public FileData(double? apexTime, ExplicitPeakBounds peakBounds)
{
ApexTime = apexTime;
PeakBounds = peakBounds;
}
public double? ApexTime { get; private set; }
public ExplicitPeakBounds PeakBounds { get; private set; }
}
private class ElibSpectrumKey
{
public ElibSpectrumKey(int entryIndex, int fileId)
{
EntryIndex = entryIndex;
FileId = fileId;
}
public int EntryIndex { get; private set; }
public int FileId { get; private set; }
}
private class EncyclopeDiaScores
{
/// <summary>
/// Mapping from (peptideModSeq,precursorCharge) to Map of filename to (qValue, posteriorErrorProbability).
/// Only one file should have a qValue, but just in case there are multiple files, we store it in a dictionary.
/// </summary>
private Dictionary<Tuple<string, int>, IDictionary<string, Tuple<double, double>>> _dictionary
= new Dictionary<Tuple<string, int>, IDictionary<string, Tuple<double, double>>>();
public double? GetQValue(string peptideModSeq, int precursorCharge, string file)
{
IDictionary<string, Tuple<double, double>> values;
if (!_dictionary.TryGetValue(Tuple.Create(peptideModSeq, precursorCharge), out values))
{
return null;
}
Tuple<double, double> result;
values.TryGetValue(file, out result);
if (result == null)
{
// If there is not an exact match of the file, just use the first qValue for that peptide&charge
result = values.Values.FirstOrDefault();
}
if (result == null)
{
return null;
}
return result.Item1;
}
public void ReadScores(SQLiteConnection connection)
{
if (!SqliteOperations.TableExists(connection, @"peptidescores"))
{
return;
}
using (var cmd = new SQLiteCommand(connection))
{
cmd.CommandText =
@"select PeptideModSeq, PrecursorCharge, SourceFile, QValue, PosteriorErrorProbability from peptidescores";
using (var reader = cmd.ExecuteReader())
{
while (reader.Read())
{
var key = Tuple.Create(reader.GetString(0), Convert.ToInt32(reader.GetValue(1)));
var value = Tuple.Create(reader.GetDouble(3), reader.GetDouble(3));
string filename = reader.GetString(2);
IDictionary<string, Tuple<double, double>> values;
if (!_dictionary.TryGetValue(key, out values))
{
values = new Dictionary<string, Tuple<double, double>>();
_dictionary.Add(key, values);
}
values[filename] = value;
}
}
}
}
}
}
}
| 1 | 13,282 | Just curious - why not var? | ProteoWizard-pwiz | .cs |
@@ -26,6 +26,9 @@ import {
clearWebStorage,
} from './util/standalone';
+// Load image for the parent menu item.
+import '../images/logo-g_white_small.png';
+
if ( 'toplevel_page_googlesitekit-dashboard' !== global.pagenow && 'site-kit_page_googlesitekit-splash' !== global.pagenow && 'admin_page_googlesitekit-splash' !== global.pagenow && global.localStorage ) {
// The total notifications count should always rely on local storage
// directly for external availability. | 1 | /**
* Admin utilities.
*
* This JavaScript loads on every admin page. Reserved for later.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import {
appendNotificationsCount,
clearWebStorage,
} from './util/standalone';
if ( 'toplevel_page_googlesitekit-dashboard' !== global.pagenow && 'site-kit_page_googlesitekit-splash' !== global.pagenow && 'admin_page_googlesitekit-splash' !== global.pagenow && global.localStorage ) {
// The total notifications count should always rely on local storage
// directly for external availability.
const count = global.localStorage.getItem( 'googlesitekit::total-notifications' ) || 0;
appendNotificationsCount( count );
}
let wpLogout = document.querySelector( '#wp-admin-bar-logout a' );
// Support for WordPress.com signout button.
if ( ! wpLogout ) {
wpLogout = document.querySelector( '.sidebar__me-signout button' );
}
if ( wpLogout ) {
wpLogout.addEventListener( 'click', () => {
clearWebStorage();
} );
}
| 1 | 32,886 | Why would we import this here and not use it? Is there a side-effect of this import? | google-site-kit-wp | js |
@@ -281,8 +281,10 @@ module RSpec::Core
# @attr pending_examples [Array<RSpec::Core::Example>] the pending examples
# @attr load_time [Float] the number of seconds taken to boot RSpec
# and load the spec files
+ # @attr errors [Integer] the number of errors that have occurred processing
+ # the spec suite
SummaryNotification = Struct.new(:duration, :examples, :failed_examples,
- :pending_examples, :load_time)
+ :pending_examples, :load_time, :errors)
class SummaryNotification
# @api
# @return [Fixnum] the number of examples run | 1 | RSpec::Support.require_rspec_core "formatters/console_codes"
RSpec::Support.require_rspec_core "formatters/exception_presenter"
RSpec::Support.require_rspec_core "formatters/helpers"
RSpec::Support.require_rspec_core "shell_escape"
module RSpec::Core
# Notifications are value objects passed to formatters to provide them
# with information about a particular event of interest.
module Notifications
# @private
module NullColorizer
module_function
def wrap(line, _code_or_symbol)
line
end
end
# The `StartNotification` represents a notification sent by the reporter
# when the suite is started. It contains the expected amount of examples
# to be executed, and the load time of RSpec.
#
# @attr count [Fixnum] the number counted
# @attr load_time [Float] the number of seconds taken to boot RSpec
# and load the spec files
StartNotification = Struct.new(:count, :load_time)
# The `ExampleNotification` represents notifications sent by the reporter
# which contain information about the current (or soon to be) example.
# It is used by formatters to access information about that example.
#
# @example
# def example_started(notification)
# puts "Hey I started #{notification.example.description}"
# end
#
# @attr example [RSpec::Core::Example] the current example
ExampleNotification = Struct.new(:example)
class ExampleNotification
# @private
def self.for(example)
execution_result = example.execution_result
return SkippedExampleNotification.new(example) if execution_result.example_skipped?
return new(example) unless execution_result.status == :pending || execution_result.status == :failed
klass = if execution_result.pending_fixed?
PendingExampleFixedNotification
elsif execution_result.status == :pending
PendingExampleFailedAsExpectedNotification
else
FailedExampleNotification
end
klass.new(example)
end
private_class_method :new
end
# The `ExamplesNotification` represents notifications sent by the reporter
# which contain information about the suites examples.
#
# @example
# def stop(notification)
# puts "Hey I ran #{notification.examples.size}"
# end
#
class ExamplesNotification
def initialize(reporter)
@reporter = reporter
end
# @return [Array<RSpec::Core::Example>] list of examples
def examples
@reporter.examples
end
# @return [Array<RSpec::Core::Example>] list of failed examples
def failed_examples
@reporter.failed_examples
end
# @return [Array<RSpec::Core::Example>] list of pending examples
def pending_examples
@reporter.pending_examples
end
# @return [Array<RSpec::Core::Notifications::ExampleNotification>]
# returns examples as notifications
def notifications
@notifications ||= format_examples(examples)
end
# @return [Array<RSpec::Core::Notifications::FailedExampleNotification>]
# returns failed examples as notifications
def failure_notifications
@failed_notifications ||= format_examples(failed_examples)
end
# @return [Array<RSpec::Core::Notifications::SkippedExampleNotification,
# RSpec::Core::Notifications::PendingExampleFailedAsExpectedNotification>]
# returns pending examples as notifications
def pending_notifications
@pending_notifications ||= format_examples(pending_examples)
end
# @return [String] The list of failed examples, fully formatted in the way
# that RSpec's built-in formatters emit.
def fully_formatted_failed_examples(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
formatted = "\nFailures:\n"
failure_notifications.each_with_index do |failure, index|
formatted << failure.fully_formatted(index.next, colorizer)
end
formatted
end
# @return [String] The list of pending examples, fully formatted in the
# way that RSpec's built-in formatters emit.
def fully_formatted_pending_examples(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
formatted = "\nPending: (Failures listed here are expected and do not affect your suite's status)\n"
pending_notifications.each_with_index do |notification, index|
formatted << notification.fully_formatted(index.next, colorizer)
end
formatted
end
private
def format_examples(examples)
examples.map do |example|
ExampleNotification.for(example)
end
end
end
# The `FailedExampleNotification` extends `ExampleNotification` with
# things useful for examples that have failure info -- typically a
# failed or pending spec.
#
# @example
# def example_failed(notification)
# puts "Hey I failed :("
# puts "Here's my stack trace"
# puts notification.exception.backtrace.join("\n")
# end
#
# @attr [RSpec::Core::Example] example the current example
# @see ExampleNotification
class FailedExampleNotification < ExampleNotification
public_class_method :new
# @return [Exception] The example failure
def exception
@exception_presenter.exception
end
# @return [String] The example description
def description
@exception_presenter.description
end
# Returns the message generated for this failure line by line.
#
# @return [Array<String>] The example failure message
def message_lines
@exception_presenter.message_lines
end
# Returns the message generated for this failure colorized line by line.
#
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array<String>] The example failure message colorized
def colorized_message_lines(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
@exception_presenter.colorized_message_lines(colorizer)
end
# Returns the failures formatted backtrace.
#
# @return [Array<String>] the examples backtrace lines
def formatted_backtrace
@exception_presenter.formatted_backtrace
end
# Returns the failures colorized formatted backtrace.
#
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array<String>] the examples colorized backtrace lines
def colorized_formatted_backtrace(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
@exception_presenter.colorized_formatted_backtrace(colorizer)
end
# @return [String] The failure information fully formatted in the way that
# RSpec's built-in formatters emit.
def fully_formatted(failure_number, colorizer=::RSpec::Core::Formatters::ConsoleCodes)
@exception_presenter.fully_formatted(failure_number, colorizer)
end
private
def initialize(example, exception_presenter=Formatters::ExceptionPresenter::Factory.new(example).build)
@exception_presenter = exception_presenter
super(example)
end
end
# @deprecated Use {FailedExampleNotification} instead.
class PendingExampleFixedNotification < FailedExampleNotification; end
# @deprecated Use {FailedExampleNotification} instead.
class PendingExampleFailedAsExpectedNotification < FailedExampleNotification; end
# The `SkippedExampleNotification` extends `ExampleNotification` with
# things useful for specs that are skipped.
#
# @attr [RSpec::Core::Example] example the current example
# @see ExampleNotification
class SkippedExampleNotification < ExampleNotification
public_class_method :new
# @return [String] The pending detail fully formatted in the way that
# RSpec's built-in formatters emit.
def fully_formatted(pending_number, colorizer=::RSpec::Core::Formatters::ConsoleCodes)
formatted_caller = RSpec.configuration.backtrace_formatter.backtrace_line(example.location)
colorizer.wrap("\n #{pending_number}) #{example.full_description}", :pending) << "\n " <<
Formatters::ExceptionPresenter::PENDING_DETAIL_FORMATTER.call(example, colorizer) <<
"\n" << colorizer.wrap(" # #{formatted_caller}\n", :detail)
end
end
# The `GroupNotification` represents notifications sent by the reporter
# which contain information about the currently running (or soon to be)
# example group. It is used by formatters to access information about that
# group.
#
# @example
# def example_group_started(notification)
# puts "Hey I started #{notification.group.description}"
# end
# @attr group [RSpec::Core::ExampleGroup] the current group
GroupNotification = Struct.new(:group)
# The `MessageNotification` encapsulates generic messages that the reporter
# sends to formatters.
#
# @attr message [String] the message
MessageNotification = Struct.new(:message)
# The `SeedNotification` holds the seed used to randomize examples and
# whether that seed has been used or not.
#
# @attr seed [Fixnum] the seed used to randomize ordering
# @attr used [Boolean] whether the seed has been used or not
SeedNotification = Struct.new(:seed, :used)
class SeedNotification
# @api
# @return [Boolean] has the seed been used?
def seed_used?
!!used
end
private :used
# @return [String] The seed information fully formatted in the way that
# RSpec's built-in formatters emit.
def fully_formatted
"\nRandomized with seed #{seed}\n"
end
end
# The `SummaryNotification` holds information about the results of running
# a test suite. It is used by formatters to provide information at the end
# of the test run.
#
# @attr duration [Float] the time taken (in seconds) to run the suite
# @attr examples [Array<RSpec::Core::Example>] the examples run
# @attr failed_examples [Array<RSpec::Core::Example>] the failed examples
# @attr pending_examples [Array<RSpec::Core::Example>] the pending examples
# @attr load_time [Float] the number of seconds taken to boot RSpec
# and load the spec files
SummaryNotification = Struct.new(:duration, :examples, :failed_examples,
:pending_examples, :load_time)
class SummaryNotification
# @api
# @return [Fixnum] the number of examples run
def example_count
@example_count ||= examples.size
end
# @api
# @return [Fixnum] the number of failed examples
def failure_count
@failure_count ||= failed_examples.size
end
# @api
# @return [Fixnum] the number of pending examples
def pending_count
@pending_count ||= pending_examples.size
end
# @api
# @return [String] A line summarising the result totals of the spec run.
def totals_line
summary = Formatters::Helpers.pluralize(example_count, "example")
summary << ", " << Formatters::Helpers.pluralize(failure_count, "failure")
summary << ", #{pending_count} pending" if pending_count > 0
summary
end
# @api public
#
# Wraps the results line with colors based on the configured
# colors for failure, pending, and success. Defaults to red,
# yellow, green accordingly.
#
# @param colorizer [#wrap] An object which supports wrapping text with
# specific colors.
# @return [String] A colorized results line.
def colorized_totals_line(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
if failure_count > 0
colorizer.wrap(totals_line, RSpec.configuration.failure_color)
elsif pending_count > 0
colorizer.wrap(totals_line, RSpec.configuration.pending_color)
else
colorizer.wrap(totals_line, RSpec.configuration.success_color)
end
end
# @api public
#
# Formats failures into a rerunable command format.
#
# @param colorizer [#wrap] An object which supports wrapping text with
# specific colors.
# @return [String] A colorized summary line.
def colorized_rerun_commands(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
"\nFailed examples:\n\n" +
failed_examples.map do |example|
colorizer.wrap("rspec #{rerun_argument_for(example)}", RSpec.configuration.failure_color) + " " +
colorizer.wrap("# #{example.full_description}", RSpec.configuration.detail_color)
end.join("\n")
end
# @return [String] a formatted version of the time it took to run the
# suite
def formatted_duration
Formatters::Helpers.format_duration(duration)
end
# @return [String] a formatted version of the time it took to boot RSpec
# and load the spec files
def formatted_load_time
Formatters::Helpers.format_duration(load_time)
end
# @return [String] The summary information fully formatted in the way that
# RSpec's built-in formatters emit.
def fully_formatted(colorizer=::RSpec::Core::Formatters::ConsoleCodes)
formatted = "\nFinished in #{formatted_duration} " \
"(files took #{formatted_load_time} to load)\n" \
"#{colorized_totals_line(colorizer)}\n"
unless failed_examples.empty?
formatted << colorized_rerun_commands(colorizer) << "\n"
end
formatted
end
private
include RSpec::Core::ShellEscape
def rerun_argument_for(example)
location = example.location_rerun_argument
return location unless duplicate_rerun_locations.include?(location)
conditionally_quote(example.id)
end
def duplicate_rerun_locations
@duplicate_rerun_locations ||= begin
locations = RSpec.world.all_examples.map(&:location_rerun_argument)
Set.new.tap do |s|
locations.group_by { |l| l }.each do |l, ls|
s << l if ls.count > 1
end
end
end
end
end
# The `ProfileNotification` holds information about the results of running a
# test suite when profiling is enabled. It is used by formatters to provide
# information at the end of the test run for profiling information.
#
# @attr duration [Float] the time taken (in seconds) to run the suite
# @attr examples [Array<RSpec::Core::Example>] the examples run
# @attr number_of_examples [Fixnum] the number of examples to profile
# @attr example_groups [Array<RSpec::Core::Profiler>] example groups run
class ProfileNotification
def initialize(duration, examples, number_of_examples, example_groups)
@duration = duration
@examples = examples
@number_of_examples = number_of_examples
@example_groups = example_groups
end
attr_reader :duration, :examples, :number_of_examples
# @return [Array<RSpec::Core::Example>] the slowest examples
def slowest_examples
@slowest_examples ||=
examples.sort_by do |example|
-example.execution_result.run_time
end.first(number_of_examples)
end
# @return [Float] the time taken (in seconds) to run the slowest examples
def slow_duration
@slow_duration ||=
slowest_examples.inject(0.0) do |i, e|
i + e.execution_result.run_time
end
end
# @return [String] the percentage of total time taken
def percentage
@percentage ||=
begin
time_taken = slow_duration / duration
'%.1f' % ((time_taken.nan? ? 0.0 : time_taken) * 100)
end
end
# @return [Array<RSpec::Core::Example>] the slowest example groups
def slowest_groups
@slowest_groups ||= calculate_slowest_groups
end
private
def calculate_slowest_groups
# stop if we've only one example group
return {} if @example_groups.keys.length <= 1
@example_groups.each_value do |hash|
hash[:average] = hash[:total_time].to_f / hash[:count]
end
groups = @example_groups.sort_by { |_, hash| -hash[:average] }.first(number_of_examples)
groups.map { |group, data| [group.location, data] }
end
end
# The `DeprecationNotification` is issued by the reporter when a deprecated
# part of RSpec is encountered. It represents information about the
# deprecated call site.
#
# @attr message [String] A custom message about the deprecation
# @attr deprecated [String] A custom message about the deprecation (alias of
# message)
# @attr replacement [String] An optional replacement for the deprecation
# @attr call_site [String] An optional call site from which the deprecation
# was issued
DeprecationNotification = Struct.new(:deprecated, :message, :replacement, :call_site)
class DeprecationNotification
private_class_method :new
# @api
# Convenience way to initialize the notification
def self.from_hash(data)
new data[:deprecated], data[:message], data[:replacement], data[:call_site]
end
end
# `NullNotification` represents a placeholder value for notifications that
# currently require no information, but we may wish to extend in future.
class NullNotification
end
# `CustomNotification` is used when sending custom events to formatters /
# other registered listeners, it creates attributes based on supplied hash
# of options.
class CustomNotification < Struct
# @param options [Hash] A hash of method / value pairs to create on this notification
# @return [CustomNotification]
#
# Build a custom notification based on the supplied option key / values.
def self.for(options={})
return NullNotification if options.keys.empty?
new(*options.keys).new(*options.values)
end
end
end
end
| 1 | 16,547 | The name `errors` is a bit mis-leading -- usually I'd expect a field called `errors` to be an array of errors but here it's just a count. And it's not a count of _all_ errors -- it's a count only of some errors. | rspec-rspec-core | rb |
@@ -24,7 +24,6 @@ class AppKernel extends Kernel
new JMS\TranslationBundle\JMSTranslationBundle(),
new Presta\SitemapBundle\PrestaSitemapBundle(),
new Prezent\Doctrine\TranslatableBundle\PrezentDoctrineTranslatableBundle(),
- new RaulFraile\Bundle\LadybugBundle\RaulFraileLadybugBundle(),
new Sensio\Bundle\FrameworkExtraBundle\SensioFrameworkExtraBundle(),
new Shopsys\FormTypesBundle\ShopsysFormTypesBundle(),
new ShopSys\MigrationBundle\ShopSysMigrationBundle(), | 1 | <?php
use Shopsys\Environment;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\HttpKernel\Kernel;
class AppKernel extends Kernel
{
/**
* @{inheritdoc}
*/
public function registerBundles()
{
$bundles = [
new Bmatzner\JQueryBundle\BmatznerJQueryBundle(),
new Bmatzner\JQueryUIBundle\BmatznerJQueryUIBundle(),
new Craue\FormFlowBundle\CraueFormFlowBundle(),
new Doctrine\Bundle\DoctrineBundle\DoctrineBundle(),
new Doctrine\Bundle\FixturesBundle\DoctrineFixturesBundle(),
new Doctrine\Bundle\MigrationsBundle\DoctrineMigrationsBundle(),
new FM\ElfinderBundle\FMElfinderBundle(),
new Fp\JsFormValidatorBundle\FpJsFormValidatorBundle(),
new Intaro\PostgresSearchBundle\IntaroPostgresSearchBundle(),
new JMS\TranslationBundle\JMSTranslationBundle(),
new Presta\SitemapBundle\PrestaSitemapBundle(),
new Prezent\Doctrine\TranslatableBundle\PrezentDoctrineTranslatableBundle(),
new RaulFraile\Bundle\LadybugBundle\RaulFraileLadybugBundle(),
new Sensio\Bundle\FrameworkExtraBundle\SensioFrameworkExtraBundle(),
new Shopsys\FormTypesBundle\ShopsysFormTypesBundle(),
new ShopSys\MigrationBundle\ShopSysMigrationBundle(),
new Shopsys\ProductFeed\HeurekaBundle\ShopsysProductFeedHeurekaBundle(),
new Shopsys\ProductFeed\HeurekaDeliveryBundle\ShopsysProductFeedHeurekaDeliveryBundle(),
new Shopsys\ProductFeed\ZboziBundle\ShopsysProductFeedZboziBundle(),
new Shopsys\ProductFeed\GoogleBundle\ShopsysProductFeedGoogleBundle(),
new Stof\DoctrineExtensionsBundle\StofDoctrineExtensionsBundle(),
new Symfony\Bundle\AsseticBundle\AsseticBundle(),
new Symfony\Bundle\FrameworkBundle\FrameworkBundle(),
new Symfony\Bundle\MonologBundle\MonologBundle(),
new Symfony\Bundle\SecurityBundle\SecurityBundle(),
new Symfony\Bundle\SwiftmailerBundle\SwiftmailerBundle(),
new Symfony\Bundle\TwigBundle\TwigBundle(),
new Symfony\Cmf\Bundle\RoutingBundle\CmfRoutingBundle(),
new Symplify\ControllerAutowire\SymplifyControllerAutowireBundle(),
new Symplify\DefaultAutowire\SymplifyDefaultAutowireBundle(),
new VasekPurchart\ConsoleErrorsBundle\ConsoleErrorsBundle(),
new Ivory\CKEditorBundle\IvoryCKEditorBundle(), // has to be loaded after FrameworkBundle and TwigBundle
new Shopsys\ShopBundle\ShopsysShopBundle(), // must be loaded as last, because translations must overwrite other bundles
];
if ($this->getEnvironment() === Environment::ENVIRONMENT_DEVELOPMENT) {
$bundles[] = new Sensio\Bundle\DistributionBundle\SensioDistributionBundle();
$bundles[] = new Sensio\Bundle\GeneratorBundle\SensioGeneratorBundle();
$bundles[] = new Symfony\Bundle\WebProfilerBundle\WebProfilerBundle();
$bundles[] = new Symfony\Bundle\DebugBundle\DebugBundle();
}
if ($this->getEnvironment() === Environment::ENVIRONMENT_TEST) {
$bundles[] = new Shopsys\IntegrationTestingBundle\ShopsysIntegrationTestingBundle();
}
return $bundles;
}
/**
* @{inheritdoc}
*/
public function registerContainerConfiguration(LoaderInterface $loader)
{
foreach ($this->getConfigs() as $filename) {
if (file_exists($filename) && is_readable($filename)) {
$loader->load($filename);
}
}
}
/**
* @return string[]
*/
private function getConfigs()
{
$configs = [
__DIR__ . '/config/parameters_common.yml',
__DIR__ . '/config/parameters.yml',
__DIR__ . '/config/paths.yml',
__DIR__ . '/config/config.yml',
__DIR__ . '/config/security.yml',
];
switch ($this->getEnvironment()) {
case Environment::ENVIRONMENT_DEVELOPMENT:
$configs[] = __DIR__ . '/config/config_dev.yml';
break;
case Environment::ENVIRONMENT_TEST:
$configs[] = __DIR__ . '/config/parameters_test.yml';
$configs[] = __DIR__ . '/config/config_test.yml';
break;
}
return $configs;
}
/**
* @{inheritdoc}
*/
public function getRootDir()
{
return __DIR__;
}
/**
* @{inheritdoc}
*/
public function getCacheDir()
{
return dirname(__DIR__) . '/var/cache/' . $this->getEnvironment();
}
/**
* @{inheritdoc}
*/
public function getLogDir()
{
return dirname(__DIR__) . '/var/logs';
}
}
| 1 | 8,763 | there was no usage of the bundle in the framework? everything works exactly the same after the removal? | shopsys-shopsys | php |
@@ -327,6 +327,14 @@ var _ = infrastructure.DatastoreDescribe("_BPF-SAFE_ IPIP topology before adding
BeforeEach(func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
+ if bpfEnabled {
+ infra.RemoveNodeAddresses(felixes[0])
+ } else {
+ for _, f := range felixes {
+ infra.RemoveNodeAddresses(f)
+ }
+ }
+
listOptions := options.ListOptions{}
if bpfEnabled {
listOptions.Name = felixes[0].Hostname | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build fvtests
package fv_test
import (
"context"
"errors"
"fmt"
"os"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/projectcalico/felix/fv/connectivity"
"github.com/projectcalico/felix/fv/utils"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/felix/fv/containers"
"github.com/projectcalico/felix/fv/infrastructure"
"github.com/projectcalico/felix/fv/workload"
)
var _ = infrastructure.DatastoreDescribe("_BPF-SAFE_ IPIP topology before adding host IPs to IP sets", []apiconfig.DatastoreType{apiconfig.EtcdV3, apiconfig.Kubernetes}, func(getInfra infrastructure.InfraFactory) {
var (
bpfEnabled = os.Getenv("FELIX_FV_ENABLE_BPF") == "true"
infra infrastructure.DatastoreInfra
felixes []*infrastructure.Felix
client client.Interface
w [2]*workload.Workload
hostW [2]*workload.Workload
cc *connectivity.Checker
)
BeforeEach(func() {
infra = getInfra()
felixes, client = infrastructure.StartNNodeTopology(2, infrastructure.DefaultTopologyOptions(), infra)
// Install a default profile that allows all ingress and egress, in the absence of any Policy.
infra.AddDefaultAllow()
// Wait until the tunl0 device appears; it is created when felix inserts the ipip module
// into the kernel.
Eventually(func() error {
links, err := netlink.LinkList()
if err != nil {
return err
}
for _, link := range links {
if link.Attrs().Name == "tunl0" {
return nil
}
}
return errors.New("tunl0 wasn't auto-created")
}).Should(BeNil())
// Create workloads, using that profile. One on each "host".
for ii := range w {
wIP := fmt.Sprintf("10.65.%d.2", ii)
wName := fmt.Sprintf("w%d", ii)
w[ii] = workload.Run(felixes[ii], wName, "default", wIP, "8055", "tcp")
w[ii].ConfigureInInfra(infra)
hostW[ii] = workload.Run(felixes[ii], fmt.Sprintf("host%d", ii), "", felixes[ii].IP, "8055", "tcp")
}
if bpfEnabled {
for _, f := range felixes {
Eventually(f.NumTCBPFProgsEth0, "5s", "200ms").Should(Equal(2))
}
}
cc = &connectivity.Checker{}
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
for _, felix := range felixes {
felix.Exec("iptables-save", "-c")
felix.Exec("ipset", "list")
felix.Exec("ip", "r")
felix.Exec("ip", "a")
}
}
for _, wl := range w {
wl.Stop()
}
for _, wl := range hostW {
wl.Stop()
}
for _, felix := range felixes {
felix.Stop()
}
if CurrentGinkgoTestDescription().Failed {
infra.DumpErrorData()
}
infra.Stop()
})
It("should use the --random-fully flag in the MASQUERADE rules", func() {
for _, felix := range felixes {
Eventually(func() string {
out, _ := felix.ExecOutput("iptables-save", "-c")
return out
}, "10s", "100ms").Should(ContainSubstring("--random-fully"))
}
})
It("should have workload to workload connectivity", func() {
cc.ExpectSome(w[0], w[1])
cc.ExpectSome(w[1], w[0])
cc.CheckConnectivity()
})
It("should have host to workload connectivity", func() {
cc.ExpectSome(felixes[0], w[1])
cc.ExpectSome(felixes[0], w[0])
cc.CheckConnectivity()
})
It("should have host to host connectivity", func() {
cc.ExpectSome(felixes[0], hostW[1])
cc.ExpectSome(felixes[1], hostW[0])
cc.CheckConnectivity()
})
Context("with host protection policy in place", func() {
BeforeEach(func() {
// Make sure our new host endpoints don't cut felix off from the datastore.
err := infra.AddAllowToDatastore("host-endpoint=='true'")
Expect(err).NotTo(HaveOccurred())
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
for _, f := range felixes {
hep := api.NewHostEndpoint()
hep.Name = "eth0-" + f.Name
hep.Labels = map[string]string{
"host-endpoint": "true",
}
hep.Spec.Node = f.Hostname
hep.Spec.ExpectedIPs = []string{f.IP}
_, err := client.HostEndpoints().Create(ctx, hep, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should have workload connectivity but not host connectivity", func() {
// Host endpoints (with no policies) block host-host traffic due to default drop.
cc.ExpectNone(felixes[0], hostW[1])
cc.ExpectNone(felixes[1], hostW[0])
// But the rules to allow IPIP between our hosts let the workload traffic through.
cc.ExpectSome(w[0], w[1])
cc.ExpectSome(w[1], w[0])
cc.CheckConnectivity()
})
})
Context("with all-interfaces host protection policy in place", func() {
BeforeEach(func() {
// Make sure our new host endpoints don't cut felix off from the datastore.
err := infra.AddAllowToDatastore("host-endpoint=='true'")
Expect(err).NotTo(HaveOccurred())
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
// Create host endpoints for each node.
for _, f := range felixes {
hep := api.NewHostEndpoint()
hep.Name = "all-interfaces-" + f.Name
hep.Labels = map[string]string{
"host-endpoint": "true",
"hostname": f.Hostname,
}
hep.Spec.Node = f.Hostname
hep.Spec.ExpectedIPs = []string{f.IP}
hep.Spec.InterfaceName = "*"
_, err := client.HostEndpoints().Create(ctx, hep, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should block host-to-host traffic in the absence of policy allowing it", func() {
cc.ExpectNone(felixes[0], hostW[1])
cc.ExpectNone(felixes[1], hostW[0])
cc.ExpectSome(w[0], w[1])
cc.ExpectSome(w[1], w[0])
cc.CheckConnectivity()
})
It("should allow host-to-own-pod traffic in the absence of policy allowing it but not host to other-pods", func() {
cc.ExpectSome(felixes[0], w[0])
cc.ExpectSome(felixes[1], w[1])
cc.ExpectNone(felixes[0], w[1])
cc.ExpectNone(felixes[1], w[0])
cc.CheckConnectivity()
})
It("should allow felixes[0] to reach felixes[1] if ingress and egress policies are in place", func() {
// Create a policy selecting felix[1] that allows egress.
policy := api.NewGlobalNetworkPolicy()
policy.Name = "f0-egress"
policy.Spec.Egress = []api.Rule{{Action: api.Allow}}
policy.Spec.Selector = fmt.Sprintf("hostname == '%s'", felixes[0].Hostname)
_, err := client.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions)
Expect(err).NotTo(HaveOccurred())
// But there is no policy allowing ingress into felix[1].
cc.ExpectNone(felixes[0], hostW[1])
cc.ExpectNone(felixes[1], hostW[0])
// Workload connectivity is unchanged.
cc.ExpectSome(w[0], w[1])
cc.ExpectSome(w[1], w[0])
cc.CheckConnectivity()
cc.ResetExpectations()
// Now add a policy selecting felix[1] that allows ingress.
policy = api.NewGlobalNetworkPolicy()
policy.Name = "f1-ingress"
policy.Spec.Ingress = []api.Rule{{Action: api.Allow}}
policy.Spec.Selector = fmt.Sprintf("hostname == '%s'", felixes[1].Hostname)
_, err = client.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions)
Expect(err).NotTo(HaveOccurred())
// Now felixes[0] can reach felixes[1].
cc.ExpectSome(felixes[0], hostW[1])
cc.ExpectNone(felixes[1], hostW[0])
// Workload connectivity is unchanged.
cc.ExpectSome(w[0], w[1])
cc.ExpectSome(w[1], w[0])
cc.CheckConnectivity()
})
Context("with policy allowing port 8055", func() {
BeforeEach(func() {
tcp := numorstring.ProtocolFromString("tcp")
udp := numorstring.ProtocolFromString("udp")
p8055 := numorstring.SinglePort(8055)
policy := api.NewGlobalNetworkPolicy()
policy.Name = "allow-8055"
policy.Spec.Ingress = []api.Rule{
{
Protocol: &udp,
Destination: api.EntityRule{
Ports: []numorstring.Port{p8055},
},
Action: api.Allow,
},
{
Protocol: &tcp,
Destination: api.EntityRule{
Ports: []numorstring.Port{p8055},
},
Action: api.Allow,
},
}
policy.Spec.Egress = []api.Rule{
{
Protocol: &udp,
Destination: api.EntityRule{
Ports: []numorstring.Port{p8055},
},
Action: api.Allow,
},
{
Protocol: &tcp,
Destination: api.EntityRule{
Ports: []numorstring.Port{p8055},
},
Action: api.Allow,
},
}
policy.Spec.Selector = fmt.Sprintf("has(host-endpoint)")
_, err := client.GlobalNetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions)
Expect(err).NotTo(HaveOccurred())
})
// Please take care if adding other connectivity checks into this case, to
// avoid those other checks setting up conntrack state that allows the
// existing case to pass for a different reason.
It("allows host0 to remote Calico-networked workload via service IP", func() {
// Allocate a service IP.
serviceIP := "10.96.10.1"
// Add a NAT rule for the service IP.
felixes[0].ProgramIptablesDNAT(serviceIP, w[1].IP, "OUTPUT")
// Expect to connect to the service IP.
cc.ExpectSome(felixes[0], connectivity.TargetIP(serviceIP), 8055)
cc.CheckConnectivity()
})
})
})
Context("after removing BGP address from nodes", func() {
// Simulate having a host send IPIP traffic from an unknown source, should get blocked.
BeforeEach(func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
listOptions := options.ListOptions{}
if bpfEnabled {
listOptions.Name = felixes[0].Hostname
}
l, err := client.Nodes().List(ctx, listOptions)
Expect(err).NotTo(HaveOccurred())
for _, node := range l.Items {
node.Spec.BGP = nil
_, err := client.Nodes().Update(ctx, &node, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
if bpfEnabled {
Eventually(felixes[1].NumTCBPFProgsEth0, "5s", "200ms").Should(Equal(2))
} else {
for _, f := range felixes {
// Removing the BGP config triggers a Felix restart and Felix has a 2s timer during
// a config restart to ensure that it doesn't tight loop. Wait for the ipset to be
// updated as a signal that Felix has restarted.
Eventually(func() int {
return getNumIPSetMembers(f.Container, "cali40all-hosts-net")
}, "5s", "200ms").Should(BeZero())
}
}
})
It("should have no workload to workload connectivity", func() {
cc.ExpectNone(w[0], w[1])
cc.ExpectNone(w[1], w[0])
cc.CheckConnectivity()
})
})
Context("external nodes configured", func() {
BeforeEach(func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
l, err := client.Nodes().List(ctx, options.ListOptions{})
Expect(err).NotTo(HaveOccurred())
// Now remove the BGP configuration for felixes[0]
var prevBGPSpec api.NodeBGPSpec
for _, node := range l.Items {
log.Infof("node: %v", node)
if node.Name == felixes[0].Name {
// save the old spec
prevBGPSpec = *node.Spec.BGP
node.Spec.BGP = nil
_, err = client.Nodes().Update(ctx, &node, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
}
// Removing the BGP config triggers a Felix restart. Wait for the ipset to be updated as a signal that Felix
// has restarted.
if !bpfEnabled {
for _, f := range felixes {
Eventually(func() int {
return getNumIPSetMembers(f.Container, "cali40all-hosts-net")
}, "5s", "200ms").Should(Equal(1))
}
}
updateConfig := func(addr string) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c, err := client.FelixConfigurations().Get(ctx, "default", options.GetOptions{})
Expect(err).NotTo(HaveOccurred())
c.Spec.ExternalNodesCIDRList = &[]string{addr, "1.1.1.1"}
log.WithFields(log.Fields{"felixconfiguration": c, "adding Addr": addr}).Info("Updating FelixConfiguration ")
_, err = client.FelixConfigurations().Update(ctx, c, options.SetOptions{})
Expect(err).NotTo(HaveOccurred())
}
updateConfig(prevBGPSpec.IPv4Address)
// Wait for the config to take
for _, f := range felixes {
if bpfEnabled {
Eventually(f.BPFRoutes, "5s", "200ms").Should(ContainSubstring("1.1.1.1/32"))
} else {
Eventually(func() int {
return getNumIPSetMembers(f.Container, "cali40all-hosts-net")
}, "5s", "200ms").Should(Equal(3))
}
}
})
It("should have all-hosts-net ipset configured with the external hosts and workloads connect", func() {
f := felixes[0]
// Add the ip route via tunnel back on the Felix for which we nuked when we removed its BGP spec.
f.Exec("ip", "route", "add", w[1].IP, "via", felixes[1].IP, "dev", "tunl0", "onlink")
cc.ExpectSome(w[0], w[1])
cc.CheckConnectivity()
})
})
})
func getNumIPSetMembers(c *containers.Container, ipSetName string) int {
return getIPSetCounts(c)[ipSetName]
}
func getIPSetCounts(c *containers.Container) map[string]int {
ipsetsOutput, err := c.ExecOutput("ipset", "list")
Expect(err).NotTo(HaveOccurred())
numMembers := map[string]int{}
currentName := ""
membersSeen := false
log.WithField("ipsets", ipsetsOutput).Info("IP sets state")
for _, line := range strings.Split(ipsetsOutput, "\n") {
log.WithField("line", line).Debug("Parsing line")
if strings.HasPrefix(line, "Name:") {
currentName = strings.Split(line, " ")[1]
membersSeen = false
} else if strings.HasPrefix(line, "Members:") {
membersSeen = true
} else if membersSeen && len(strings.TrimSpace(line)) > 0 {
log.Debugf("IP set %s has member %s", currentName, line)
numMembers[currentName]++
}
}
return numMembers
}
| 1 | 18,804 | Why this difference? | projectcalico-felix | go |
@@ -6533,6 +6533,8 @@ odbc_SQLSrvr_ExtractLob_sme_(
if (retcode == SQL_ERROR)
{
ERROR_DESC_def *p_buffer = QryLobExtractSrvrStmt->sqlError.errorList._buffer;
+ char errNumStr[128];
+ sprintf(errNumStr, "%d", p_buffer->sqlcode);
strncpy(RequestError, p_buffer->errorText, sizeof(RequestError) - 1);
SendEventMsg(MSG_SQL_ERROR, | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
********************************************************************/
//
// MODULE: SrvrOther.cpp
//
// PURPOSE: Implements the following methods
// odbc_SQLSvc_Prepare_sme_
// odbc_SQLSvc_ExecuteN_sme_
// odbc_SQLSvc_Close_sme_
// odbc_SQLSvc_FetchN_sme_
// odbc_SQLSvc_EndTransaction_sme_
// odbc_SQLSvc_ExecuteCall_sme_
//
#include <platform_ndcs.h>
#include <platform_utils.h>
#include <stdio.h>
#include <stdlib.h>
#include <map>
#include <string>
// Used to disable and enable dumps via setrlimit in a release env.
#ifndef _DEBUG
#include <sys/resource.h>
#endif
#include <sql.h>
#include <sqlext.h>
#include "odbcCommon.h"
#include "odbc_sv.h"
#include "srvrcommon.h"
#include "sqlinterface.h"
#include "srvrkds.h"
#include "SQLWrapper.h"
#include "CommonDiags.h"
#include "tdm_odbcSrvrMsg.h"
#include "ResStatistics.h"
#include "ResStatisticsSession.h"
#include "ResStatisticsStatement.h"
#include "NskUtil.h"
// reserved names for seabase metadata where SQL table information is kept
#ifndef SEABASE_MD_SCHEMA
#define SEABASE_MD_SCHEMA "\"_MD_\""
#define SEABASE_MD_CATALOG "TRAFODION"
#define SEABASE_COLUMNS "COLUMNS"
#define SEABASE_DEFAULTS "DEFAULTS"
#define SEABASE_INDEXES "INDEXES"
#define SEABASE_KEYS "KEYS"
#define SEABASE_OBJECTS "OBJECTS"
#define SEABASE_OBJECTUID "OBJECTUID"
#define SEABASE_TABLES "TABLES"
#define SEABASE_VIEWS "VIEWS"
#define SEABASE_VIEWS_USAGE "VIEWS_USAGE"
#define SEABASE_VERSIONS "VERSIONS"
#endif
//#include "zmxocfg.h" //aruna
#include <dlfcn.h>
#include "sqlcli.h"
#include "TransportBase.h"
// #ifdef _TMP_SQ_SECURITY
#include "secsrvrmxo.h"
#include "dbUserAuth.h"
// #endif
#define SQL_API_TBLSYNONYM 1917
#define SQL_API_TBLMVS 1918
using namespace SRVR;
bool rePrepare2WouldLikeToExecute(Long stmtHandle
, Int32 *returnCode
, Int32 *sqlWarningOrErrorLength
, char *&sqlWarningOrError);
short qrysrvcExecuteFinished(
const char *stmtLabel
, const Long stmtHandle
, const bool bCheckSqlQueryType
, const short error_code
, const bool bFetch
, const bool bException
, const bool bErase);
void AllocateAdaptiveSegment(SRVR_STMT_HDL *pSrvrStmt);
void DeallocateAdaptiveSegment(SRVR_STMT_HDL *pSrvrStmt);
void ClearAdaptiveSegment(short adapiveSeg = -1);
static void setAuthenticationError(
bool & bSQLMessageSet,
odbc_SQLSvc_SQLError * SQLError,
const char * externalUsername,
bool isInternalError);
// Internal calls - Defined in libcli.so
int SQL_EXEC_AssignParserFlagsForExSqlComp_Internal( /*IN*/ unsigned int flagbits);
int SQL_EXEC_GetParserFlagsForExSqlComp_Internal( /*IN*/ unsigned int &flagbits);
void SQL_EXEC_SetParserFlagsForExSqlComp_Internal( /*IN*/ unsigned int flagbits);
#define INTERNAL_QUERY_FROM_EXEUTIL 0x20000
//#define SRVR_PERFORMANCE
SMD_SELECT_TABLE SQLCommitStmt[] = {
{ STRING_TYPE, "COMMIT WORK"},
{ END_OF_TABLE}
};
SMD_SELECT_TABLE SQLRollbackStmt[] = {
{ STRING_TYPE, "ROLLBACK WORK"},
{ END_OF_TABLE}
};
SMD_QUERY_TABLE tranQueryTable[] = {
{"SQL_COMMIT", SQLCommitStmt, TYPE_UNKNOWN, FALSE, FALSE},
{"SQL_ROLLBACK", SQLRollbackStmt, TYPE_UNKNOWN, FALSE, FALSE},
{NULL}
};
#define SQL_API_JDBC 9999
#define SQL_API_SQLTABLES_JDBC SQL_API_SQLTABLES + SQL_API_JDBC
#define SQL_API_SQLGETTYPEINFO_JDBC SQL_API_SQLGETTYPEINFO + SQL_API_JDBC
#define SQL_API_SQLCOLUMNS_JDBC SQL_API_SQLCOLUMNS + SQL_API_JDBC
#define SQL_API_SQLSPECIALCOLUMNS_JDBC SQL_API_SQLSPECIALCOLUMNS + SQL_API_JDBC
#define SQL_API_SQLPROCEDURES_JDBC SQL_API_SQLPROCEDURES + SQL_API_JDBC
#define SQL_API_SQLPROCEDURECOLUMNS_JDBC SQL_API_SQLPROCEDURECOLUMNS + SQL_API_JDBC
// The value represents SQL version, MXCS module major version and MXCS module minor version.
#define MODULE_RELEASE_VERSION 200
#define MODULE_MAJOR_VERSION 400
#define MODULE_MINOR_VERSION 000
#define SQL_INVALID_USER_CODE -8837
// ResStatistics
ResStatisticsSession *resStatSession;
ResStatisticsStatement *resStatStatement;
BOOL resStatCollectorError = false;
struct collect_info setinit;
Int32 inState = STMTSTAT_NONE;
short inSqlStmtType = TYPE_UNKNOWN;
double inEstimatedCost = 0;
char *inQueryId = NULL;
char *inSqlString = NULL;
Int32 inErrorStatement = 0;
Int32 inWarningStatement = 0;
int64 inRowCount = 0;
Int32 inErrorCode = 0;
Int32 inSqlQueryType = 0;
Int32 inSqlNewQueryType = 0;
char *inSqlError = NULL;
Int32 inSqlErrorLength = 0;
bool setStatisticsFlag = FALSE;
// end ResStatistics
char b[317];
bool securitySetup = false;
//char QueryQueue[1024];
//int64 queryId;
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Prepare'
*/
extern "C" void
odbc_SQLSvc_Prepare_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_Prepare_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ const IDL_char *stmtExplainLabel
, /* In */ IDL_short stmtType
, /* In */ IDL_string sqlString
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *estimatedCost
, /* Out */ SQLItemDescList_def *inputDesc
, /* Out */ SQLItemDescList_def *outputDesc
, /* Out */ ERROR_DESC_LIST_def *sqlWarning)
{
SRVRTRACE_ENTER(FILE_SME+1)
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQL_QUERY_COST_INFO cost_info;
SQL_QUERY_COMPILER_STATS_INFO comp_stats_info;
SQLRETURN rc = PROGRAM_ERROR;
Int32 holdestimatedCost;
bool flag_21036 = false;
if (sqlString == NULL)
{
exception_->exception_nr = odbc_SQLSvc_Prepare_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_NULL_SQL_STMT;
}
// resource statistics
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
{
inState = STMTSTAT_PREPARE;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlQueryType = SQL_UNKNOWN;
inSqlNewQueryType = SQL_UNKNOWN;
inSqlError = NULL;
inSqlErrorLength = 0;
bzero(&cost_info, sizeof(cost_info));
resStatStatement->start(inState,
inSqlQueryType,
stmtLabel,
NULL,
inEstimatedCost,
&flag_21036,
sqlString);
}
if (exception_->exception_nr == 0)
{
if (stmtType != TYPE_SMD)
{
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) != NULL)
{
pSrvrStmt->cleanupAll();
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
pSrvrStmt->freeResourceOpt = SQL_DROP;
FREESTATEMENT(pSrvrStmt);
}
// Need to validate the stmtLabel
// Given a label find out the SRVR_STMT_HDL
if ((pSrvrStmt = getSrvrStmt(stmtLabel, TRUE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_Prepare_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
if (exception_->exception_nr == 0)
{
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
pSrvrStmt->inState = inState;
rc = SQL_SUCCESS;
if (!pSrvrStmt->isReadFromModule)
{
// cleanup all memory allocated in the previous operations
pSrvrStmt->cleanupAll();
pSrvrStmt->sqlStringLen = strlen(sqlString);
pSrvrStmt->sqlString = new char[pSrvrStmt->sqlStringLen+1];
if (pSrvrStmt->sqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "Prepare");
exit(0);
}
strcpy(pSrvrStmt->sqlString, sqlString);
pSrvrStmt->stmtType = stmtType;
pSrvrStmt->currentMethod = odbc_SQLSvc_Prepare_ldx_;
rc = PREPARE(pSrvrStmt);
switch (rc)
{
case SQL_SUCCESS:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
}
}
}
/* else // added for user module support
{
char *sqltoken;
char sqlseps[] = " \t\n{(";
int ch = '.';
int TknLen = 0;
int SStrLen = 0;
char VariableValue[1000];
VariableValue[0] = '\0';
strcpy(VariableValue,sqlString);
sqltoken = strtok(VariableValue, sqlseps);
if (_stricmp(sqltoken, "SMD") == 0)
{
sqltoken = strtok(NULL, sqlseps);
if (sqltoken != NULL)
{
UserModuleName[0] = '\0';
UserStatementName[0] = '\0';
TknLen = strrchr(sqltoken, ch) - sqltoken + 1;
SStrLen = strlen(sqltoken);
strncpy(UserModuleName,sqltoken, TknLen-1);
UserModuleName[TknLen-1] = '\0';
strncpy(UserStatementName,sqltoken + TknLen, SStrLen);
UserStatementName[SStrLen-TknLen+1] = '\0';
// Need to validate the stmtLabel
// Given a label find out the SRVR_STMT_HDL
pSrvrStmt = getSrvrStmt(UserStatementName, TRUE);
strcpy(pSrvrStmt->stmtName, UserStatementName);
strcpy(pSrvrStmt->cursorName, UserStatementName);
pSrvrStmt->stmtType = stmtType;
user_module.module_name = UserModuleName;
user_module.module_name_len = strlen(UserModuleName);
user_module.charset = "ISO88591";
user_module.creation_timestamp = 1234567890;
rc = pSrvrStmt->PrepareUserModule();
}
else
{
// Return Error Invalid Module Name.
}
sqltoken = strtok(NULL, sqlseps);
if (sqltoken != NULL)
{
// Return Error Invalid Module Name.
}
}
else
{
// Return Error Invalid Call.
}
}
*/
if (exception_->exception_nr == 0)
{
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
exception_->exception_nr = 0;
// Copy all the output parameters
// Vijay - Changes to support not to parse tokens for statement type SELECT
holdestimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime
if ((pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE) || (pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE))
pSrvrStmt->sqlStmtType = TYPE_SELECT;
*estimatedCost = pSrvrStmt->sqlQueryType;
inputDesc->_length = pSrvrStmt->inputDescList._length;
inputDesc->_buffer = pSrvrStmt->inputDescList._buffer;
outputDesc->_length = pSrvrStmt->outputDescList._length;
outputDesc->_buffer = pSrvrStmt->outputDescList._buffer;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_Prepare_SQLStillExecuting_exn_;
break;
case ODBC_RG_ERROR:
case SQL_ERROR:
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_Prepare_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_Prepare_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_Prepare_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_PREPARE_FAILED;
default:
break;
}
}
}
// resource statistics
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
{
if (exception_->exception_nr != 0 && exception_->u.SQLError.errorList._buffer != NULL)
{
inErrorStatement ++;
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
if (sqlWarning->_length != 0)
inWarningStatement ++;
if (sqlString == NULL)
sqlString = "";
inSqlString = new char[strlen(sqlString) + 1];
if (inSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "inSqlString");
exit(0);
}
strcpy(inSqlString,sqlString);
if (pSrvrStmt != NULL)
{
inEstimatedCost = pSrvrStmt->cost_info.totalTime; // res stat reports estimated cost as double
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
inSqlNewQueryType = pSrvrStmt->sqlNewQueryType;
}
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&flag_21036,
inSqlNewQueryType);
delete inSqlString;
}
//end rs
pSrvrStmt->m_need_21036_end_msg = flag_21036;
SRVRTRACE_EXIT(FILE_SME+1);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_ExecuteN'
*/
extern "C" void
odbc_SQLSvc_ExecuteN_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_ExecuteN_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_string cursorName
, /* In */ IDL_short sqlStmtType
, /* In */ Int32 inputRowCnt
, /* In */ const SQLValueList_def *inputValueList
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *rowsAffected
, /* Out */ ERROR_DESC_LIST_def *sqlWarning)
{
SRVRTRACE_ENTER(FILE_SME+2);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
bool bWMAutoCommitOff = false;
if (inputRowCnt < 0)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_ROW_COUNT;
}
else
{
if (sqlStmtType == TYPE_SELECT && inputRowCnt > 1)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_ROW_COUNT_AND_SELECT;
}
else
{
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
}
}
if (exception_->exception_nr == 0)
{
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_EXECUTE;
inSqlStmtType = sqlStmtType;
inEstimatedCost = pSrvrStmt->cost_info.totalTime;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
false,
pSrvrStmt->sqlString); */
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlString);
}
//end rs
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->inputRowCnt = inputRowCnt;
pSrvrStmt->sqlStmtType = sqlStmtType;
if (cursorName != NULL && cursorName[0] != '\0')
{
pSrvrStmt->cursorNameLen = strlen(cursorName);
pSrvrStmt->cursorNameLen = pSrvrStmt->cursorNameLen < sizeof(pSrvrStmt->cursorName)? pSrvrStmt->cursorNameLen : sizeof(pSrvrStmt->cursorName);
strncpy(pSrvrStmt->cursorName, cursorName, sizeof(pSrvrStmt->cursorName));
pSrvrStmt->cursorName[sizeof(pSrvrStmt->cursorName)-1] = 0;
}
else
pSrvrStmt->cursorName[0] = '\0';
pSrvrStmt->inputValueList._buffer = inputValueList->_buffer;
pSrvrStmt->inputValueList._length = inputValueList->_length;
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteN_ldx_;
// batch job support for T4
// Fix for transaction issue 20/09/06
if ((WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) != 0) && srvrGlobal->bAutoCommitOn == TRUE && sqlStmtType != TYPE_SELECT)
{
bWMAutoCommitOff = true;
SRVR_STMT_HDL *TranOffSrvrStmt;
if ((TranOffSrvrStmt = getSrvrStmt("STMT_TRANS_OFF_1", FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
else
{
SQLValueList_def *inValueList;
markNewOperator,inValueList = new SQLValueList_def();
if (inValueList == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "odbc_SQLSvc_ExecuteN_sme_");
exit(0);
}
inValueList->_buffer = NULL;
inValueList->_length = 0;
rc = TranOffSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,inValueList,SQL_ASYNC_ENABLE_OFF,0);
if (rc == SQL_ERROR)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLError_exn_;
exception_->u.SQLError.errorList._length = TranOffSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = TranOffSrvrStmt->sqlError.errorList._buffer;
}
else if (rc != SQL_SUCCESS)
{
exception_->exception_nr = 0;
sqlWarning->_length = TranOffSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = TranOffSrvrStmt->sqlWarning._buffer;
}
delete inValueList;
}
}
if (exception_->exception_nr == 0)
{
rc = EXECUTE(pSrvrStmt);
switch (rc)
{
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
case SQL_SUCCESS:
exception_->exception_nr = 0;
// Copy the output values
*rowsAffected = pSrvrStmt->rowsAffected;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLStillExecuting_exn_;
break;
case SQL_INVALID_HANDLE:
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLInvalidHandle_exn_;
break;
case SQL_NEED_DATA:
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLNeedData_exn_;
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
case ODBC_SERVER_ERROR:
if (rc == ODBC_SERVER_ERROR)
{
// Allocate Error Desc
kdsCreateSQLErrorException(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError, 1);
// Add SQL Error
kdsCopySQLErrorException(&pSrvrStmt->sqlError, NULL_VALUE_ERROR, NULL_VALUE_ERROR_SQLCODE,
NULL_VALUE_ERROR_SQLSTATE);
}
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case -8814:
case 8814:
rc = SQL_RETRY_COMPILE_AGAIN;
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLRetryCompile_exn_;
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
default:
break;
}
if (resStatStatement != NULL)
{
resStatStatement->setStatistics(pSrvrStmt);
}
// batch job support for T4
// Fix for transaction issue 20/09/06
if ( bWMAutoCommitOff )
{
SQLValueList_def *inValueList;
markNewOperator,inValueList = new SQLValueList_def();
if (inValueList == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "odbc_SQLSvc_ExecuteN_sme_");
exit(0);
}
inValueList->_buffer = NULL;
inValueList->_length = 0;
if(WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0)
{
SRVR_STMT_HDL *RbwSrvrStmt;
SRVR_STMT_HDL *CmwSrvrStmt;
if (exception_->exception_nr != 0)
{
RbwSrvrStmt = getSrvrStmt("STMT_ROLLBACK_1", FALSE);
rc = RbwSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,inValueList,SQL_ASYNC_ENABLE_OFF,0);
}
else
{
CmwSrvrStmt = getSrvrStmt("STMT_COMMIT_1", FALSE);
rc = CmwSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,inValueList,SQL_ASYNC_ENABLE_OFF,0);
if (rc == SQL_ERROR)
{
ERROR_DESC_def *error_desc_def = CmwSrvrStmt->sqlError.errorList._buffer;
if (CmwSrvrStmt->sqlError.errorList._length != 0 )
{
if(error_desc_def->sqlcode != -8605 )
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
if (CEE_TMP_ALLOCATE(call_id_, 50, (void **)&(exception_->u.ParamError.ParamDesc)) == CEE_SUCCESS)
sprintf(exception_->u.ParamError.ParamDesc, "%s (%d)",SQLSVC_EXCEPTION_EXECUTE_FAILED, error_desc_def->sqlcode);
else
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
else
rc = SQL_SUCCESS;
}
else
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
}
else if (rc != SQL_SUCCESS)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
}
}
// reset back to original setting
SRVR_STMT_HDL *TranOnSrvrStmt;
if ((TranOnSrvrStmt = getSrvrStmt("STMT_TRANS_ON_1", FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
rc = TranOnSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,inValueList,SQL_ASYNC_ENABLE_OFF,0);
if (rc == SQL_ERROR)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteN_SQLError_exn_;
exception_->u.SQLError.errorList._length = TranOnSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = TranOnSrvrStmt->sqlError.errorList._buffer;
}
else if (rc != SQL_SUCCESS)
{
exception_->exception_nr = 0;
sqlWarning->_length = TranOnSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = TranOnSrvrStmt->sqlWarning._buffer;
}
delete inValueList;
}
}
}
// resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (exception_->exception_nr != 0 && exception_->u.SQLError.errorList._buffer != NULL)
{
inErrorStatement ++;
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
if (sqlWarning->_length != 0)
inWarningStatement ++;
inRowCount = *rowsAffected;
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
//end rs
SRVRTRACE_EXIT(FILE_SME+2);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Prepare2'
*/
extern "C" void
odbc_SQLSvc_Prepare2_sme_(
/* In */ Int32 inputRowCnt
, /* In */ Int32 sqlStmtType
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_string sqlString
, /* In */ Int32 holdableCursor
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
, /* Out */ Int32 *sqlQueryType
, /* Out */ Long *stmtHandle
, /* Out */ Int32 *estimatedCost
, /* Out */ Int32 *inputDescLength
, /* Out */ BYTE *&inputDesc
, /* Out */ Int32 *outputDescLength
, /* Out */ BYTE *&outputDesc
, /* In */ bool isFromExecDirect = false)
{
SRVRTRACE_ENTER(FILE_SME+18);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQL_QUERY_COST_INFO cost_info;
SQL_QUERY_COMPILER_STATS_INFO comp_stats_info;
SQLRETURN rc = SQL_SUCCESS;
bool bSkipWouldLikeToExecute = false; // some queries have to skip Would Like To Execute
bool flag_21036 = false;
if (sqlString == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY090", "Invalid SQL String.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
// resource statistics
if (resStatStatement != NULL)
{
inState = STMTSTAT_PREPARE;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlQueryType = SQL_UNKNOWN;
inSqlNewQueryType = SQL_UNKNOWN;
inSqlError = NULL;
inSqlErrorLength = 0;
bzero(&cost_info, sizeof(cost_info));
resStatStatement->start(inState,
inSqlQueryType,
stmtLabel,
NULL,
inEstimatedCost,
&flag_21036,
sqlString);
}
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) != NULL)
{
bSkipWouldLikeToExecute = pSrvrStmt->m_bSkipWouldLikeToExecute;
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
pSrvrStmt->freeResourceOpt = SQL_DROP;
FREESTATEMENT(pSrvrStmt);
}
// Need to validate the stmtLabel
// Given a label find out the SRVR_STMT_HDL
if ((pSrvrStmt = getSrvrStmt(stmtLabel, TRUE)) == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label could not be allocated.", sqlWarningOrErrorLength, sqlWarningOrError);
}
}
if (*returnCode == 0)
{
pSrvrStmt->m_bSkipWouldLikeToExecute = bSkipWouldLikeToExecute;
*stmtHandle = (Long)pSrvrStmt;
// cleanup all memory allocated in the previous operations
pSrvrStmt->cleanupAll();
if (resStatStatement != NULL)
pSrvrStmt->inState = inState;
pSrvrStmt->sqlStringLen = strlen(sqlString) + 1;
pSrvrStmt->sqlString = new char[pSrvrStmt->sqlStringLen];
if (pSrvrStmt->sqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "Prepare2");
exit(0);
}
strncpy(pSrvrStmt->sqlString, sqlString, pSrvrStmt->sqlStringLen);
pSrvrStmt->sqlStmtType = (short)sqlStmtType;
pSrvrStmt->maxRowsetSize = inputRowCnt;
if (pSrvrStmt->maxRowsetSize == ROWSET_NOT_DEFINED) pSrvrStmt->maxRowsetSize = DEFAULT_ROWSET_SIZE;
if (srvrGlobal->srvrType == CORE_SRVR)
AllocateAdaptiveSegment(pSrvrStmt);
pSrvrStmt->currentMethod = odbc_SQLSvc_PrepareRowset_ldx_;
pSrvrStmt->holdableCursor = holdableCursor;
rc = PREPARE2(pSrvrStmt,isFromExecDirect);
if (srvrGlobal->srvrType == CORE_SRVR && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
DeallocateAdaptiveSegment(pSrvrStmt);
switch (rc)
{
case ODBC_RG_WARNING:
case SQL_SHAPE_WARNING:
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
*estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime == estimatedCost
*sqlQueryType = pSrvrStmt->sqlQueryType;
*inputDescLength = pSrvrStmt->inputDescBufferLength;
inputDesc = pSrvrStmt->inputDescBuffer;
*outputDescLength = pSrvrStmt->outputDescBufferLength;
outputDesc = pSrvrStmt->outputDescBuffer;
if (rc == SQL_SUCCESS_WITH_INFO)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else if (rc == SQL_SHAPE_WARNING)
{
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
}
break;
case SQL_SUCCESS:
*estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime == estimatedCost
*sqlQueryType = pSrvrStmt->sqlQueryType;
*inputDescLength = pSrvrStmt->inputDescBufferLength;
inputDesc = pSrvrStmt->inputDescBuffer;
*outputDescLength = pSrvrStmt->outputDescBufferLength;
outputDesc = pSrvrStmt->outputDescBuffer;
break;
case SQL_ERROR:
case ODBC_RG_ERROR:
*returnCode = SQL_ERROR;
if (rc == SQL_ERROR)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
}
break;
case PROGRAM_ERROR:
GETMXCSWARNINGORERROR(-1, "HY000", SQLSVC_EXCEPTION_PREPARE_FAILED, sqlWarningOrErrorLength, sqlWarningOrError);
break;
case INFOSTATS_SYNTAX_ERROR:
case INFOSTATS_STMT_NOT_FOUND:
*returnCode = SQL_ERROR;
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
default:
break;
}
}
// resource statistics
if (resStatStatement != NULL)
{
if (*returnCode == SQL_ERROR && pSrvrStmt != NULL && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (sqlString == NULL)
sqlString = "";
if (pSrvrStmt != NULL)
{
inSqlString = new char[pSrvrStmt->sqlStringLen];
if (inSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "inSqlString");
exit(0);
}
strncpy(inSqlString,sqlString,pSrvrStmt->sqlStringLen);
inEstimatedCost = pSrvrStmt->cost_info.totalTime; // res stat reports estimated cost as double
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
inSqlNewQueryType = pSrvrStmt->sqlNewQueryType;
}
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&flag_21036,
inSqlNewQueryType);
if(inSqlString != NULL)
delete inSqlString;
}
//end rs
if(pSrvrStmt != NULL)
pSrvrStmt->m_need_21036_end_msg = flag_21036;
SRVRTRACE_EXIT(FILE_SME+18);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Prepare2withRowsets'
*/
extern "C" void
odbc_SQLSvc_Prepare2withRowsets_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ Int32 sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* In */ Int32 inputRowCnt
, /* In */ Int32 sqlStmtType
, /* In */ Int32 stmtLength
, /* In */ const IDL_char *stmtLabel
, /* In */ Int32 stmtLabelCharset
, /* In */ Int32 cursorLength
, /* In */ IDL_string cursorName
, /* In */ Int32 cursorCharset
, /* In */ Int32 moduleNameLength
, /* In */ const IDL_char *moduleName
, /* In */ Int32 moduleCharset
, /* In */ int64 moduleTimestamp
, /* In */ Int32 sqlStringLength
, /* In */ IDL_string sqlString
, /* In */ Int32 sqlStringCharset
, /* In */ Int32 setStmtOptionsLength
, /* In */ IDL_string setStmtOptions
, /* In */ Int32 holdableCursor
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
, /* Out */ Int32 *sqlQueryType
, /* Out */ Long *stmtHandle
, /* Out */ Int32 *estimatedCost
, /* Out */ Int32 *inputDescLength
, /* Out */ BYTE *&inputDesc
, /* Out */ Int32 *outputDescLength
, /* Out */ BYTE *&outputDesc)
{
SRVRTRACE_ENTER(FILE_SME+18);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQL_QUERY_COST_INFO cost_info;
SQL_QUERY_COMPILER_STATS_INFO comp_stats_info;
SQLRETURN rc = SQL_SUCCESS;
bool flag_21036 = false;
if (sqlString == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY090", "Invalid SQL String.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
// resource statistics
if (resStatStatement != NULL)
{
inState = STMTSTAT_PREPARE;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlQueryType = SQL_UNKNOWN;
inSqlNewQueryType = SQL_UNKNOWN;
inSqlError = NULL;
inSqlErrorLength = 0;
bzero(&cost_info, sizeof(cost_info));
resStatStatement->start(inState,
inSqlQueryType,
stmtLabel,
NULL,
inEstimatedCost,
&flag_21036,
sqlString);
}
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) != NULL)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
pSrvrStmt->freeResourceOpt = SQL_DROP;
FREESTATEMENT(pSrvrStmt);
}
// Need to validate the stmtLabel
// Given a label find out the SRVR_STMT_HDL
if ((pSrvrStmt = getSrvrStmt(stmtLabel, TRUE)) == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", sqlWarningOrErrorLength, sqlWarningOrError);
}
}
if (*returnCode == 0)
{
*stmtHandle = (Long)pSrvrStmt;
// cleanup all memory allocated in the previous operations
pSrvrStmt->cleanupAll();
if (resStatStatement != NULL)
pSrvrStmt->inState = inState;
pSrvrStmt->sqlStringLen = sqlStringLength;
pSrvrStmt->sqlString = new char[sqlStringLength];
if (pSrvrStmt->sqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "Prepare2");
exit(0);
}
strncpy(pSrvrStmt->sqlString, sqlString, sqlStringLength);
pSrvrStmt->sqlStmtType = (short)sqlStmtType;
pSrvrStmt->maxRowsetSize = inputRowCnt;
if (pSrvrStmt->maxRowsetSize == ROWSET_NOT_DEFINED) pSrvrStmt->maxRowsetSize = DEFAULT_ROWSET_SIZE;
// this part is for NAR (not Atomic Rowset Recovery)
if (pSrvrStmt->maxRowsetSize > 1
&& (pSrvrStmt->sqlStmtType == TYPE_INSERT_PARAM))
// || pSrvrStmt->sqlStmtType == TYPE_UPDATE
// || pSrvrStmt->sqlStmtType == TYPE_DELETE))
{
if (srvrGlobal->EnvironmentType & MXO_ROWSET_ERROR_RECOVERY)
rc = WSQL_EXEC_SetStmtAttr(&pSrvrStmt->stmt, SQL_ATTR_ROWSET_ATOMICITY, SQL_NOT_ATOMIC,0);
else
rc = WSQL_EXEC_SetStmtAttr(&pSrvrStmt->stmt, SQL_ATTR_ROWSET_ATOMICITY, SQL_ATOMIC, 0);
if (rc < 0)
{
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
return;
}
}
if (srvrGlobal->srvrType == CORE_SRVR)
AllocateAdaptiveSegment(pSrvrStmt);
pSrvrStmt->currentMethod = odbc_SQLSvc_PrepareRowset_ldx_;
pSrvrStmt->holdableCursor = holdableCursor;
rc = PREPARE2withRowsets(pSrvrStmt);
if (srvrGlobal->srvrType == CORE_SRVR && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
DeallocateAdaptiveSegment(pSrvrStmt);
switch (rc)
{
case ODBC_RG_WARNING:
case SQL_SHAPE_WARNING:
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
*estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime == estimatedCost
if (pSrvrStmt->sqlBulkFetchPossible && pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE)
*sqlQueryType = 10000;
else
*sqlQueryType = pSrvrStmt->sqlQueryType;
*inputDescLength = pSrvrStmt->inputDescBufferLength;
inputDesc = pSrvrStmt->inputDescBuffer;
*outputDescLength = pSrvrStmt->outputDescBufferLength;
outputDesc = pSrvrStmt->outputDescBuffer;
if (rc == SQL_SUCCESS_WITH_INFO)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else if (rc == SQL_SHAPE_WARNING)
{
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char RGWarningOrError[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
}
break;
case SQL_SUCCESS:
*estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime == estimatedCost
if (pSrvrStmt->sqlBulkFetchPossible && pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE)
*sqlQueryType = 10000;
else
*sqlQueryType = pSrvrStmt->sqlQueryType;
*inputDescLength = pSrvrStmt->inputDescBufferLength;
inputDesc = pSrvrStmt->inputDescBuffer;
*outputDescLength = pSrvrStmt->outputDescBufferLength;
outputDesc = pSrvrStmt->outputDescBuffer;
break;
case SQL_ERROR:
case ODBC_RG_ERROR:
*returnCode = SQL_ERROR;
if (rc == SQL_ERROR)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
}
break;
case PROGRAM_ERROR:
GETMXCSWARNINGORERROR(-1, "HY000", SQLSVC_EXCEPTION_PREPARE_FAILED, sqlWarningOrErrorLength, sqlWarningOrError);
break;
default:
break;
}
}
// resource statistics
if (resStatStatement != NULL)
{
if (*returnCode == SQL_ERROR && pSrvrStmt != NULL && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (sqlString == NULL)
sqlString = "";
inSqlString = new char[sqlStringLength];
if (inSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "inSqlString");
exit(0);
}
strncpy(inSqlString,sqlString,sqlStringLength);
if (pSrvrStmt != NULL)
{
inEstimatedCost = pSrvrStmt->cost_info.totalTime; // res stat reports estimated cost as double
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
inSqlNewQueryType = pSrvrStmt->sqlNewQueryType;
}
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&flag_21036,
inSqlNewQueryType);
delete inSqlString;
}
//end rs
pSrvrStmt->m_need_21036_end_msg = flag_21036;
SRVRTRACE_EXIT(FILE_SME+18);
return;
} // end SQLSvc_Prepare2withRowsets_sme
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Execute2'
*/
extern "C" void
odbc_SQLSvc_Execute2_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ Int32 sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* In */ Int32 inputRowCnt
, /* In */ Int32 sqlStmtType
, /* In */ Long stmtHandle
, /* In */ Int32 cursorLength
, /* In */ IDL_string cursorName
, /* In */ Int32 cursorCharset
, /* In */ Int32 holdableCursor
, /* In */ Int32 inValuesLength
, /* In */ BYTE *inValues
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
, /* Out */ Int32 *rowsAffected
, /* Out */ Int32 *outValuesLength
, /* Out */ BYTE *&outValues)
{
SRVRTRACE_ENTER(FILE_SME+19);
bool bRePrepare2 = false;
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if ((pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle) == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
if (pSrvrStmt->current_holdableCursor != holdableCursor)
{
rePrepare2( pSrvrStmt
, sqlStmtType
, inputRowCnt
, holdableCursor
, &rc
, returnCode
, sqlWarningOrErrorLength
, sqlWarningOrError
);
bRePrepare2 = true;
}
if (*returnCode == 0 || *returnCode == 1)
{
// resource statistics
// generate the actual start message after reprepare, if any.
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_EXECUTE;
inSqlStmtType = sqlStmtType;
inEstimatedCost = pSrvrStmt->cost_info.totalTime;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
// For UPSERT statements force it as INSERT if the driver has sent a unknown type.
if( inSqlStmtType == TYPE_UNKNOWN && (pSrvrStmt->sqlQueryType == SQL_INSERT_UNIQUE || pSrvrStmt->sqlQueryType == SQL_INSERT_NON_UNIQUE) )
inSqlStmtType = TYPE_INSERT;
}
//end rs
if (inputRowCnt < 0)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Row Count.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
if (sqlStmtType == TYPE_SELECT && inputRowCnt > 1)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Row Count.", sqlWarningOrErrorLength, sqlWarningOrError);
}
}
if (*returnCode == 0 || *returnCode == 1)
{
if ((*returnCode == 0) && (pSrvrStmt->sqlWarningOrErrorLength > 0)) // To preserve warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
pSrvrStmt->inputRowCnt = inputRowCnt;
pSrvrStmt->sqlStmtType = (short)sqlStmtType;
if (cursorLength > 0)
{
pSrvrStmt->cursorNameLen = cursorLength;
memcpy(pSrvrStmt->cursorName, cursorName, cursorLength);
pSrvrStmt->cursorName[cursorLength] = '\0';
}
else
pSrvrStmt->cursorName[0] = '\0';
if (pSrvrStmt->sqlQueryType == SQL_RWRS_SPECIAL_INSERT)
{
//memcpy(pSrvrStmt->inputDescVarBuffer, (void *)&pSrvrStmt->inputRowCnt , sizeof(pSrvrStmt->inputRowCnt) );
//memcpy(pSrvrStmt->inputDescVarBuffer+4, (void *)&pSrvrStmt->maxRowLen, sizeof(pSrvrStmt->maxRowLen) );
*((Int32 *)pSrvrStmt->inputDescVarBuffer) = pSrvrStmt->inputRowCnt;
*((Int32 *)(pSrvrStmt->inputDescVarBuffer+4)) = pSrvrStmt->maxRowLen;
//*((Int32)pSrvrStmt->inputDescVarBuffer+8) = inValues ;
*((BYTE **)(pSrvrStmt->inputDescVarBuffer+8)) = inValues ;
}
else
{
if (pSrvrStmt->inputDescVarBufferLen == inValuesLength)
memcpy(pSrvrStmt->inputDescVarBuffer, inValues, inValuesLength);
else
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY090", "Invalid param Values.", sqlWarningOrErrorLength, sqlWarningOrError);
}
}
if (bRePrepare2)
{
rc = rePrepare2WouldLikeToExecute((Long)pSrvrStmt, (Int32*)returnCode, (Int32*)sqlWarningOrErrorLength, (char*&)sqlWarningOrError);
if (rc == false)
{
*rowsAffected = -1;
if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT))
{
// generate 21036 start message
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlString);
}
goto out0;
}
}
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlString);
}
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteN_ldx_;
rc = EXECUTE2(pSrvrStmt);
// char tmpString[32];
// tmpString[0] = '\0';
// sprintf(tmpString, "e: %Ld", pSrvrStmt->cliElapseTime);
// SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
// srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
// srvrGlobal->srvrObjRef, 1, tmpString);
switch (rc)
{
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
*rowsAffected = pSrvrStmt->rowsAffected;
if ( pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE
|| pSrvrStmt->sqlStmtType == TYPE_CALL)
{
*outValuesLength = pSrvrStmt->outputDescVarBufferLen;
outValues = pSrvrStmt->outputDescVarBuffer;
}
else
{
if (pSrvrStmt->sqlQueryType == SQL_RWRS_SPECIAL_INSERT)
{
if (inValues != NULL)
*pSrvrStmt->inputDescVarBuffer = NULL;
}
*outValuesLength = 0;
outValues = 0;
}
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
case SQL_SUCCESS:
*returnCode = SQL_SUCCESS;
*rowsAffected = pSrvrStmt->rowsAffected;
if ( pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE
|| pSrvrStmt->sqlStmtType == TYPE_CALL)
{
*outValuesLength = pSrvrStmt->outputDescVarBufferLen;
outValues = pSrvrStmt->outputDescVarBuffer;
}
else
{
if (pSrvrStmt->sqlQueryType == SQL_RWRS_SPECIAL_INSERT)
{
if (inValues != NULL)
*pSrvrStmt->inputDescVarBuffer = NULL;
}
*outValuesLength = 0;
outValues = 0;
}
break;
case SQL_NO_DATA_FOUND:
*returnCode = SQL_NO_DATA_FOUND;
break;
case SQL_INVALID_HANDLE:
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Statement Handle.", sqlWarningOrErrorLength, sqlWarningOrError);
break;
case SQL_ERROR:
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // Overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
GETSQLWARNINGORERROR2(pSrvrStmt);
*returnCode = SQL_ERROR;
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
default:
break;
}
if (resStatStatement != NULL)
{
// Should avoid collecting statistics for
// statement types that do not fetch until end of data or close the stmt
// during execute since it's causing issues in RMS.
if( pSrvrStmt->sqlQueryType != SQL_SELECT_NON_UNIQUE && pSrvrStmt->sqlQueryType != SQL_CALL_WITH_RESULT_SETS)
resStatStatement->setStatistics(pSrvrStmt);
}
}
out0:
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (*returnCode == SQL_ERROR && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (pSrvrStmt->rowsAffectedHigherBytes != 0)
inRowCount = -1;
else
inRowCount = *rowsAffected;
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
//end rs
}
}
SRVRTRACE_EXIT(FILE_SME+19);
return;
}
//--------------------------------------------------------------------------------
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Execute2withRowsets'
*/
extern "C" void
odbc_SQLSvc_Execute2withRowsets_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ Int32 sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* In */ Int32 inputRowCnt
, /* In */ Int32 sqlStmtType
, /* In */ Long stmtHandle
, /* In */ Int32 cursorLength
, /* In */ IDL_string cursorName
, /* In */ Int32 cursorCharset
, /* In */ Int32 holdableCursor
, /* In */ Int32 inValuesLength
, /* In */ BYTE *inValues
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
, /* Out */ Int32 *rowsAffected
, /* Out */ Int32 *outValuesLength
, /* Out */ BYTE *&outValues)
{
SRVRTRACE_ENTER(FILE_SME+19);
bool bRePrepare2 = false;
/*
* The performance team wanted to be able to stub out the actual inserts
* to measure the contributions of individual components to the overall
* load times. If the env variable mxosrvr-stubout-EXECUTE2withRowsets
* is set in ms.env, we will skip over the call to EXECUTE2withRowsets
* and return sql_success, and rowsAffected = input row count
*/
static bool bCheckStubExecute2WithRowsets = true;
static bool bStubExecute2WithRowsets = false;
if(bCheckStubExecute2WithRowsets)
{
char *env = getenv("mxosrvr-stubout-EXECUTE2withRowsets");
if (env != NULL && strcmp(env,"true") == 0)
bStubExecute2WithRowsets = true;
bCheckStubExecute2WithRowsets = false;
}
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if ((pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle) == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
*returnCode = SQL_SUCCESS;
if (inputRowCnt < 0)
{
}
else if (sqlStmtType == TYPE_SELECT && inputRowCnt > 1)
{
}
else if ((pSrvrStmt->maxRowsetSize < inputRowCnt) || (pSrvrStmt->current_holdableCursor != holdableCursor))
{
rePrepare2( pSrvrStmt
, sqlStmtType
, inputRowCnt
, holdableCursor
,&rc
, returnCode
, sqlWarningOrErrorLength
, sqlWarningOrError
);
bRePrepare2 = true;
}
if (*returnCode == 0 || *returnCode == 1)
{
// resource statistics
// generate the actual start message after reprepare, if any.
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_EXECUTE;
inSqlStmtType = sqlStmtType;
inEstimatedCost = pSrvrStmt->cost_info.totalTime;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
}
//end rs
if (inputRowCnt < 0)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Row Count.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
if (sqlStmtType == TYPE_SELECT && inputRowCnt > 1)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Row Count.", sqlWarningOrErrorLength, sqlWarningOrError);
}
}
if (*returnCode == 0 || *returnCode == 1)
{
// Fix for CR 5763/6389 - added additional checks to make sure the warnings, if any, are not lost from the
// rePrepare2() call in SrvrConnect.cpp (returnCode could be 0).
if ((*returnCode == 0) && (pSrvrStmt->sqlWarningOrErrorLength > 0) && pSrvrStmt->reprepareWarn == FALSE) // To preserve warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
if ( (*returnCode == 0 && rc == 0) || (*returnCode == 1 && rc == 1) )
{
pSrvrStmt->inputRowCnt = inputRowCnt;
pSrvrStmt->sqlStmtType = (short)sqlStmtType;
if (cursorLength > 0)
{
pSrvrStmt->cursorNameLen = cursorLength;
memcpy(pSrvrStmt->cursorName, cursorName, cursorLength);
}
else
pSrvrStmt->cursorName[0] = '\0';
if (pSrvrStmt->preparedWithRowsets == TRUE)
{
pSrvrStmt->transportBuffer = inValues;
pSrvrStmt->transportBufferLen = inValuesLength;
}
else if (pSrvrStmt->inputDescVarBufferLen == inValuesLength)
memcpy(pSrvrStmt->inputDescVarBuffer, inValues, inValuesLength);
else
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR( -1
, "HY090"
, "Invalid param Values."
, sqlWarningOrErrorLength
, sqlWarningOrError
);
goto out;
}
if (bRePrepare2)
{
// Note: The below method ends in a dummy call in CommonNSKFunctions.cpp. CR 5763 takes care of this.
rc = rePrepare2WouldLikeToExecute((Long)pSrvrStmt, (Int32*)returnCode, (Int32*)sqlWarningOrErrorLength, (char*&)sqlWarningOrError);
if (rc == false)
{
*rowsAffected = -1;
if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT))
{
// generate 21036 start message
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlString);
}
goto out;
}
}
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlString);
}
//end rs
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteN_ldx_;
if(!bStubExecute2WithRowsets)
rc = EXECUTE2withRowsets(pSrvrStmt);
else {
rc = SQL_SUCCESS;
pSrvrStmt->rowsAffected = inputRowCnt;
}
switch (rc)
{
case ROWSET_SQL_ERROR:
// Copy the output values
*rowsAffected = -1;
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // Overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
GETSQLWARNINGORERROR2forRowsets(pSrvrStmt);
*returnCode = SQL_ERROR;
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
*rowsAffected = pSrvrStmt->rowsAffected;
if ( pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE
|| pSrvrStmt->sqlStmtType == TYPE_CALL)
{
*outValuesLength = pSrvrStmt->outputDescVarBufferLen;
outValues = pSrvrStmt->outputDescVarBuffer;
}
else
{
*outValuesLength = 0;
outValues = 0;
}
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // Overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
if (pSrvrStmt->sqlWarning._length > 0)
GETSQLWARNINGORERROR2forRowsets(pSrvrStmt);
else
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
case SQL_SUCCESS:
*returnCode = SQL_SUCCESS;
*rowsAffected = pSrvrStmt->rowsAffected;
if (pSrvrStmt->sqlWarning._length > 0)
{
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // Overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
GETSQLWARNINGORERROR2forRowsets(pSrvrStmt);
*returnCode = SQL_SUCCESS_WITH_INFO; // We have warnings so return success witn info.
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
if (pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE
|| pSrvrStmt->sqlStmtType == TYPE_CALL)
{
*outValuesLength = pSrvrStmt->outputDescVarBufferLen;
outValues = pSrvrStmt->outputDescVarBuffer;
}
else
{
*outValuesLength = 0;
outValues = 0;
}
break;
case SQL_NO_DATA_FOUND:
*returnCode = SQL_NO_DATA_FOUND;
break;
case SQL_INVALID_HANDLE:
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Statement Handle.", sqlWarningOrErrorLength, sqlWarningOrError);
break;
case SQL_ERROR:
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // Overwriting warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
GETSQLWARNINGORERROR2(pSrvrStmt);
*returnCode = SQL_ERROR;
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
default:
break;
}
if (resStatStatement != NULL)
{
// We don't need a check here similar to odbc_SQLSvc_Execute2_sme_()
// since for rowsets we don't call
// WSQL_EXEC_Exec().
resStatStatement->setStatistics(pSrvrStmt);
}
//
// The following transaction check was taken from Zbig.
//
if ( sqlStmtType == TYPE_INSERT_PARAM
&& (srvrGlobal->EnvironmentType & MXO_ROWSET_ERROR_RECOVERY)
&& pSrvrStmt->NA_supported == false
&& srvrGlobal->bAutoCommitOn == true)
{
if (SQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0)
{
// transaction is running - do commit/rollback
SQLValueList_def inValueList;
inValueList._buffer = NULL;
inValueList._length = 0;
if (rc == ROWSET_SQL_ERROR)
{
SRVR_STMT_HDL *RbwSrvrStmt = getSrvrStmt("STMT_ROLLBACK_1", FALSE);
RbwSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,&inValueList,SQL_ASYNC_ENABLE_OFF,0);
}
else
{
SRVR_STMT_HDL *CmwSrvrStmt = getSrvrStmt("STMT_COMMIT_1", FALSE);
CmwSrvrStmt->Execute(NULL,1,TYPE_UNKNOWN,&inValueList,SQL_ASYNC_ENABLE_OFF,0);
}
}
}
}
} // end if (*returnCode == 0 && rc == 0)
// resource statistics
out:
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (*returnCode == SQL_ERROR && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (pSrvrStmt->rowsAffectedHigherBytes != 0)
inRowCount = -1;
else
inRowCount = *rowsAffected;
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
}
} // end else
outout:
pSrvrStmt->returnCodeForDelayedError = *returnCode;
SRVRTRACE_EXIT(FILE_SME+19);
return;
} // end odbc_SQLSvc_Execute2withRowsets_sme_
//------------------------------------------------------------------------------
extern "C" void
rePrepare2( SRVR_STMT_HDL *pSrvrStmt
, Int32 sqlStmtType
, Int32 inputRowCnt
, Int32 holdableCursor
, SQLRETURN *rc
, Int32 *returnCode
, Int32 *sqlWarningOrErrorLength
, BYTE *&sqlWarningOrError
)
{
UInt32 tmpSqlStringLen = pSrvrStmt->sqlStringLen;
char *tmpSqlString;
short tmpStmtType = pSrvrStmt->stmtType;
short tmpSqlStmtType = sqlStmtType; // need to do this since PREPARE does not pass this from driver
Int32 tmpMaxRowsetSize = pSrvrStmt->maxRowsetSize;
Int32 sqlQueryType;
Int32 estimatedCost;
if (pSrvrStmt->sqlWarningOrErrorLength > 0) // To preserve warning returned at prepare time
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
tmpSqlString = new char[tmpSqlStringLen+1];
if (tmpSqlString == NULL)
{
SendEventMsg( MSG_MEMORY_ALLOCATION_ERROR
, EVENTLOG_ERROR_TYPE
, srvrGlobal->nskProcessInfo.processId
, ODBCMX_SERVER
, srvrGlobal->srvrObjRef
, 1
, "Execute2"
);
exit(0);
}
strcpy(tmpSqlString, pSrvrStmt->sqlString);
// cleanup all memory allocated in the previous operations
pSrvrStmt->cleanupAll();
pSrvrStmt->sqlStringLen = tmpSqlStringLen;
pSrvrStmt->sqlString = new char[pSrvrStmt->sqlStringLen+1];
if (pSrvrStmt->sqlString == NULL)
{
SendEventMsg( MSG_MEMORY_ALLOCATION_ERROR
, EVENTLOG_ERROR_TYPE
, srvrGlobal->nskProcessInfo.processId
, ODBCMX_SERVER
, srvrGlobal->srvrObjRef
, 1
, "Execute2"
);
exit(0);
}
strcpy(pSrvrStmt->sqlString, tmpSqlString);
pSrvrStmt->stmtType = tmpStmtType;
pSrvrStmt->sqlStmtType = tmpSqlStmtType;
pSrvrStmt->maxRowsetSize = inputRowCnt;
pSrvrStmt->holdableCursor= holdableCursor;
if (pSrvrStmt->maxRowsetSize == ROWSET_NOT_DEFINED)
pSrvrStmt->maxRowsetSize = DEFAULT_ROWSET_SIZE;
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_PREPARE;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlQueryType = SQL_UNKNOWN;
inSqlNewQueryType = SQL_UNKNOWN;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
inSqlQueryType,
pSrvrStmt->stmtName,
NULL,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
false,
tmpSqlString);*/
resStatStatement->start(inState,
inSqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg,
tmpSqlString);
}
*rc = REALLOCSQLMXHDLS(pSrvrStmt); // This is a workaround for executor when we switch between OLTP vs NON-OLTP
if (*rc < 0)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
goto out;
}
if (pSrvrStmt->sqlStmtType == TYPE_INSERT_PARAM)
{
if (srvrGlobal->EnvironmentType & MXO_ROWSET_ERROR_RECOVERY)
*rc = WSQL_EXEC_SetStmtAttr(&pSrvrStmt->stmt, SQL_ATTR_ROWSET_ATOMICITY, SQL_NOT_ATOMIC,0);
else
*rc = WSQL_EXEC_SetStmtAttr(&pSrvrStmt->stmt, SQL_ATTR_ROWSET_ATOMICITY, SQL_ATOMIC, 0);
if (*rc < 0)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
goto out;
}
WSQL_EXEC_ClearDiagnostics(&pSrvrStmt->stmt);
}
if (srvrGlobal->srvrType == CORE_SRVR)
AllocateAdaptiveSegment(pSrvrStmt);
pSrvrStmt->currentMethod = odbc_SQLSvc_PrepareRowset_ldx_; // KAS - bug. This should be Prepare2.
if(pSrvrStmt->maxRowsetSize > 1)
*rc = PREPARE2withRowsets(pSrvrStmt);
else
*rc = PREPARE2(pSrvrStmt);
if (srvrGlobal->srvrType == CORE_SRVR && *rc != SQL_SUCCESS && *rc != SQL_SUCCESS_WITH_INFO)
DeallocateAdaptiveSegment(pSrvrStmt);
switch (*rc)
{
case ODBC_RG_WARNING:
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // change to double in future
sqlQueryType = pSrvrStmt->sqlQueryType;
if (*rc == SQL_SUCCESS_WITH_INFO)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char RGWarningOrError[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf( RGWarningOrError
, "The query's estimated cost: %.50s exceeded resource management attribute limit set."
, b
);
GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
}
break;
case SQL_SUCCESS:
WSQL_EXEC_ClearDiagnostics(&pSrvrStmt->stmt);
estimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // change to double in future
sqlQueryType = pSrvrStmt->sqlQueryType;
break;
case SQL_ERROR:
case ODBC_RG_ERROR:
*returnCode = SQL_ERROR;
if (*rc == SQL_ERROR)
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf( RGWarningOrError
, "The query's estimated cost: %.50s exceeded resource management attribute limit set."
, b
);
GETMXCSWARNINGORERROR( -1
, "HY000"
, RGWarningOrError
, sqlWarningOrErrorLength
, sqlWarningOrError
);
delete RGWarningOrError;
}
break;
case PROGRAM_ERROR:
GETMXCSWARNINGORERROR( -1
, "HY000"
, SQLSVC_EXCEPTION_PREPARE_FAILED
, sqlWarningOrErrorLength
, sqlWarningOrError
);
break;
default:
break;
} // end switch
out:
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (*returnCode == SQL_ERROR && pSrvrStmt != NULL && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (tmpSqlString == NULL)
tmpSqlString = "";
inSqlString = new char[pSrvrStmt->sqlStringLen];
if (inSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "inSqlString");
exit(0);
}
strncpy(inSqlString,tmpSqlString,pSrvrStmt->sqlStringLen);
if (pSrvrStmt != NULL)
{
inEstimatedCost = pSrvrStmt->cost_info.totalTime; // res stat reports estimated cost as double
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
inSqlNewQueryType = pSrvrStmt->sqlNewQueryType;
}
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
inSqlNewQueryType);
delete inSqlString;
delete tmpSqlString;
}
//end rs
} // end rePrepare2
//------------------------------------------------------------------------------
//LCOV_EXCL_START
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Fetch2'
*/
extern "C" void
odbc_SQLSvc_Fetch2_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ Int32 sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* In */ Long stmtHandle
, /* In */ Int32 maxRowCnt
, /* In */ Int32 cursorLength
, /* In */ IDL_string cursorName
, /* In */ Int32 cursorCharset
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
, /* Out */ Int32 *rowsAffected
, /* Out */ Int32 *outValuesFormat
, /* Out */ Int32 *outValuesLength
, /* Out */ BYTE *&outValues)
{
SRVRTRACE_ENTER(FILE_SME+20);
SRVR_STMT_HDL *pSrvrStmt;
SQLRETURN rc = SQL_SUCCESS;
if ((pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle) == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else if (maxRowCnt < 0)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Max row count < 0.", sqlWarningOrErrorLength, sqlWarningOrError);
}
else if (pSrvrStmt->isClosed)
*returnCode = SQL_NO_DATA_FOUND;
else
{
pSrvrStmt->maxRowCnt = maxRowCnt;
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_FETCH;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/* resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
pSrvrStmt->stmtName,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
} // end if resource statistics
if (pSrvrStmt->sqlWarningOrErrorLength > 0)
{
if (pSrvrStmt->sqlWarningOrError != NULL)
delete pSrvrStmt->sqlWarningOrError;
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
if (cursorLength > 0)
{
pSrvrStmt->cursorNameLen = cursorLength;
memcpy(pSrvrStmt->cursorName, cursorName, cursorLength);
}
else
pSrvrStmt->cursorName[0] = '\0';
pSrvrStmt->currentMethod = odbc_SQLSvc_FetchPerf_ldx_; // KAS - bug. Should be Fetch2
//
// We will either use fetch bulk (also known as fetch "row wise rowsets") or fetch rowsets
// (also known as fetch "column wise rowsets").
//
if (pSrvrStmt->sqlBulkFetchPossible && pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE)
{
if (pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._buffer = NULL;
pSrvrStmt->outputDataValue._length = 0;
rc = FETCH2bulk(pSrvrStmt);
*outValuesFormat = ROWWISE_ROWSETS;
if (pSrvrStmt->rowsAffected > 0)
{
if (pSrvrStmt->outputDataValue._length == 0 && pSrvrStmt->outputDataValue._buffer == NULL)
{
outValues = pSrvrStmt->outputDescVarBuffer;
*outValuesLength = (Int32)(pSrvrStmt->outputDescVarBufferLen * pSrvrStmt->rowsAffected);
}
else
{
outValues = pSrvrStmt->outputDataValue._buffer;
*outValuesLength = (Int32)(pSrvrStmt->outputDataValue._length);
}
}
else
{
outValues = NULL;
*outValuesLength = 0;
}
}
else
rc = FETCH2(pSrvrStmt, outValuesFormat, outValuesLength, outValues);
switch (rc)
{
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
*rowsAffected = pSrvrStmt->rowsAffected;
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
case SQL_SUCCESS:
*returnCode = SQL_SUCCESS;
*rowsAffected = pSrvrStmt->rowsAffected;
break;
case SQL_NO_DATA_FOUND:
*returnCode = SQL_NO_DATA_FOUND;
break;
case SQL_INVALID_HANDLE:
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Statement Handle.", sqlWarningOrErrorLength, sqlWarningOrError);
break;
case SQL_ERROR:
GETSQLWARNINGORERROR2(pSrvrStmt);
*returnCode = SQL_ERROR;
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
default:
break;
} // end switch
if (resStatStatement != NULL && (rc == SQL_NO_DATA_FOUND || (rc == SQL_SUCCESS && *rowsAffected < maxRowCnt)))
{
resStatStatement->setStatistics(pSrvrStmt);
}
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (*returnCode == SQL_ERROR && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inErrorStatement ++;
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
inRowCount = *rowsAffected;
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
} // end resStatStatement != NULL
} // end if ((pSrvrStmt = (SRVR_STMT_HDL *)stmtHandle) == NULL) else
SRVRTRACE_EXIT(FILE_SME+20);
return;
} // end odbc_SQLSvc_Fetch2_sme_
//LCOV_EXCL_STOP
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Close'
*/
extern "C" void
odbc_SQLSvc_Close_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_Close_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_unsigned_short freeResourceOpt
, /* Out */ Int32 *rowsAffected
, /* Out */ ERROR_DESC_LIST_def *sqlWarning
)
{
SRVRTRACE_ENTER(FILE_SME+3);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if (freeResourceOpt != SQL_CLOSE && freeResourceOpt != SQL_DROP &&
freeResourceOpt != SQL_UNBIND && freeResourceOpt != SQL_RESET_PARAMS)
{
exception_->exception_nr = odbc_SQLSvc_Close_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_RESOURCE_OPT_CLOSE;
}
else
{
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) == NULL)
goto ret; // Statement was never allocated.
if (freeResourceOpt == SQL_CLOSE && pSrvrStmt->isClosed)
goto ret;
}
if (exception_->exception_nr == 0)
{
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_CLOSE;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
// if SQLClose is called after SQLExecute/SQLExecdirect of select stmt,
// END:SQLFetch/END:QueryExecution still needs to be generated
// if (pSrvrStmt->isClosed == FALSE) pSrvrStmt->bFetchStarted = TRUE;
if (pSrvrStmt->isClosed == FALSE)
{
pSrvrStmt->bFetchStarted = FALSE;
if (exception_->exception_nr != 0 && exception_->u.SQLError.errorList._buffer != NULL)
{
inErrorStatement ++;
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
inQueryId=pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
}
//end rs
rc = SQL_SUCCESS;
if (pSrvrStmt->stmtType != INTERNAL_STMT)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->freeResourceOpt = freeResourceOpt;
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
if (pSrvrStmt->sqlQueryType == SQL_SP_RESULT_SET)
{
// The result set can not be re-used, so remove it completely.
freeResourceOpt = SQL_DROP;
pSrvrStmt->freeResourceOpt = freeResourceOpt;
delete [] pSrvrStmt->SpjProxySyntaxString;
pSrvrStmt->SpjProxySyntaxString = NULL;
pSrvrStmt->SpjProxySyntaxStringLen = 0;
// remove the result set from the call statement
SRVR_STMT_HDL *prev = pSrvrStmt->previousSpjRs;
SRVR_STMT_HDL *next = pSrvrStmt->nextSpjRs;
prev->nextSpjRs = next;
if (next != NULL)
next->previousSpjRs = prev;
// If prev is the call statement itself, and the call statement has no more
// result sets, then close the call statement.
if (prev->sqlQueryType == SQL_CALL_WITH_RESULT_SETS && prev->nextSpjRs == NULL)
rc = FREESTATEMENT(prev);
}
rc = FREESTATEMENT(pSrvrStmt);
// Return back immediately since the pSrvrStmt is deleted and return SQL_SUCCESS always
if (freeResourceOpt == SQL_DROP)
{
rc = SQL_SUCCESS;
}
else
{
switch (rc)
{
case SQL_SUCCESS:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
}
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
exception_->exception_nr = 0;
if (freeResourceOpt != SQL_DROP)
{
*rowsAffected = pSrvrStmt->rowsAffected;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
}
else
{
*rowsAffected = 0;
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
}
break;
case SQL_ERROR:
exception_->exception_nr = odbc_SQLSvc_Close_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_Close_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_CLOSE_FAILED;
default:
break;
}
}
ret:
/* This code is moved to the begining of this method since pSrvrStmt is deleted in case of a
SQL_DROP.
// resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->isClosed == TRUE && pSrvrStmt->bFetchStarted == TRUE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->bFetchStarted = FALSE;
if (exception_->exception_nr != 0 && exception_->u.SQLError.errorList._buffer != NULL)
{
inErrorStatement ++;
inErrorCode = exception_->u.SQLError.errorList._buffer->sqlcode;
}
inQueryId=pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,inSqlQueryType,inSqlStmtType,inQueryId,inEstimatedCost,inSqlString,inErrorStatement,inWarningStatement,inRowCount,inErrorCode,resStatSession,pSrvrStmt->isClosed);
}
*/
SRVRTRACE_EXIT(FILE_SME+3);
return;
}
//------------------------------------------------------------------------------
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_Close'
*/
extern "C" void
odbc_SQLSrvr_Close_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_unsigned_short freeResourceOpt
, /* Out */ Int32 *rowsAffected
, /* Out */ Int32 *returnCode
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
)
{
SRVRTRACE_ENTER(FILE_SME+3);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if (freeResourceOpt != SQL_CLOSE && freeResourceOpt != SQL_DROP &&
freeResourceOpt != SQL_UNBIND && freeResourceOpt != SQL_RESET_PARAMS)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", SQLSVC_EXCEPTION_INVALID_RESOURCE_OPT_CLOSE, sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
pSrvrStmt = getSrvrStmt(stmtLabel, FALSE);
if(pSrvrStmt == NULL)
goto ret; // Statement was never allocated.
else
{
if (pSrvrStmt->sqlWarningOrErrorLength > 0 &&
pSrvrStmt->sqlWarningOrError != NULL)
{
delete pSrvrStmt->sqlWarningOrError;
}
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
if (freeResourceOpt == SQL_CLOSE && pSrvrStmt->isClosed)
goto ret;
}
if (*returnCode == SQL_SUCCESS)
{
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->inState = inState = STMTSTAT_CLOSE;
pSrvrStmt->m_bqueryFinish = true;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
if (pSrvrStmt->isClosed == FALSE)
{
pSrvrStmt->bFetchStarted = FALSE;
if (*returnCode == SQL_ERROR && pSrvrStmt != NULL && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorStatement ++;
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
inQueryId=pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
if (pSrvrStmt->m_need_21036_end_msg)
{
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
}
}
//end rs
qrysrvcExecuteFinished(NULL, (Long)pSrvrStmt, false, *returnCode, false, false, true);
if ((resStatStatement != NULL) && (pSrvrStmt->stmtType == EXTERNAL_STMT)) // if statement is on
{
resStatStatement->endRepository(pSrvrStmt,
inSqlErrorLength,
(BYTE*)inSqlError,
true);
}
rc = SQL_SUCCESS;
if (pSrvrStmt->stmtType != INTERNAL_STMT)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->freeResourceOpt = freeResourceOpt;
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
if (pSrvrStmt->sqlQueryType == SQL_SP_RESULT_SET)
{
// The result set can not be re-used, so remove it completely.
freeResourceOpt = SQL_DROP;
pSrvrStmt->freeResourceOpt = freeResourceOpt;
// Added deletion of proxy syntax
delete [] pSrvrStmt->SpjProxySyntaxString;
pSrvrStmt->SpjProxySyntaxString = NULL;
pSrvrStmt->SpjProxySyntaxStringLen = 0;
// remove the result set from the call statement
SRVR_STMT_HDL *prev = pSrvrStmt->previousSpjRs;
SRVR_STMT_HDL *next = pSrvrStmt->nextSpjRs;
prev->nextSpjRs = next;
if (next != NULL)
next->previousSpjRs = prev;
// If prev is the call statement itself, and the call statement has no more
// result sets, then close the call statement.
if (prev->sqlQueryType == SQL_CALL_WITH_RESULT_SETS && prev->nextSpjRs == NULL)
rc = FREESTATEMENT(prev);
}
rc = FREESTATEMENT(pSrvrStmt);
// Return back immediately since the pSrvrStmt is deleted and return SQL_SUCCESS always
if (freeResourceOpt == SQL_DROP)
{
rc = SQL_SUCCESS;
}
else
{
if(rc == SQL_SUCCESS_WITH_INFO ||
rc == SQL_ERROR )
{
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
}
}
switch (rc)
{
case SQL_SUCCESS:
*returnCode = SQL_SUCCESS;
if (freeResourceOpt != SQL_DROP)
*rowsAffected = pSrvrStmt->rowsAffected;
else
*rowsAffected = 0;
break;
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
if (freeResourceOpt != SQL_DROP)
*rowsAffected = pSrvrStmt->rowsAffected;
else
*rowsAffected = 0;
break;
case SQL_ERROR:
*returnCode = SQL_ERROR;
break;
default:
break;
}
}
ret:
/* This code is moved to the begining of this method since pSrvrStmt is deleted in case of a
SQL_DROP.
// resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->isClosed == TRUE && pSrvrStmt->bFetchStarted == TRUE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->bFetchStarted = FALSE;
if (*returnCode == SQL_ERROR && pSrvrStmt != NULL && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorStatement ++;
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
}
inQueryId=pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->end(inState,inSqlQueryType,inSqlStmtType,inQueryId,inEstimatedCost,inSqlString,inErrorStatement,inWarningStatement,inRowCount,inErrorCode,resStatSession,pSrvrStmt->isClosed);
}
*/
SRVRTRACE_EXIT(FILE_SME+3);
return;
} /* odbc_SQLSrvr_Close_sme_() */
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_FetchN'
*/
extern "C" void
odbc_SQLSvc_FetchN_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_FetchN_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ Int32 maxRowCnt
, /* In */ Int32 maxRowLen
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *rowsAffected
, /* Out */ SQLValueList_def *outputValueList
, /* Out */ ERROR_DESC_LIST_def *sqlWarning
)
{
SRVRTRACE_ENTER(FILE_SME+4);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if (maxRowCnt < 0)
{
exception_->exception_nr = odbc_SQLSvc_FetchN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_ROW_COUNT;
}
else
{
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_FetchN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
}
if (exception_->exception_nr == 0)
{
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->isClosed == FALSE && pSrvrStmt->bFetchStarted == FALSE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->bFetchStarted = TRUE;
pSrvrStmt->inState = inState = STMTSTAT_FETCH;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
}
//end rs
rc = SQL_SUCCESS;
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if (pSrvrStmt->outputValueList._buffer == NULL || pSrvrStmt->maxRowCnt < maxRowCnt)
{
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
rc = AllocAssignValueBuffer(pSrvrStmt->bSQLValueListSet,&pSrvrStmt->outputDescList,
&pSrvrStmt->outputValueList, pSrvrStmt->outputDescVarBufferLen,
maxRowCnt, pSrvrStmt->outputValueVarBuffer);
}
else
// Reset the length to 0, but the _buffer points to array of required SQLValue_defs
pSrvrStmt->outputValueList._length = 0;
if (rc == SQL_SUCCESS)
{
pSrvrStmt->maxRowCnt = maxRowCnt;
pSrvrStmt->maxRowLen = maxRowLen;
pSrvrStmt->currentMethod = odbc_SQLSvc_FetchN_ldx_;
rc = FETCH(pSrvrStmt);
switch (rc)
{
case SQL_SUCCESS:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
exception_->exception_nr = 0;
*rowsAffected = pSrvrStmt->rowsAffected;
outputValueList->_length = pSrvrStmt->outputValueList._length;
outputValueList->_buffer = pSrvrStmt->outputValueList._buffer;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_FetchN_SQLStillExecuting_exn_;
break;
case SQL_INVALID_HANDLE:
exception_->exception_nr = odbc_SQLSvc_FetchN_SQLInvalidHandle_exn_;
break;
case SQL_NO_DATA_FOUND:
exception_->exception_nr = odbc_SQLSvc_FetchN_SQLNoDataFound_exn_;
break;
case SQL_ERROR:
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_FetchN_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_FetchN_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_FetchN_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_FETCH_FAILED;
default:
break;
}
if (resStatStatement != NULL && (rc == SQL_NO_DATA_FOUND || rc == SQL_ERROR || ((rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) && *rowsAffected < maxRowCnt)))
resStatStatement->setStatistics(pSrvrStmt);
}
//resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->isClosed == TRUE && pSrvrStmt->bFetchStarted == TRUE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (rc == SQL_ERROR && exception_->u.SQLError.errorList._buffer != NULL)
{
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
pSrvrStmt->bFetchStarted = FALSE;
Int32 inMaxRowCnt = 0;
Int32 inMaxRowLen = 0;
inMaxRowCnt = maxRowCnt;
inMaxRowLen = maxRowLen;
if (exception_->exception_nr != 0)
inErrorStatement ++;
else
setStatisticsFlag = FALSE;
if (sqlWarning->_length != 0)
inWarningStatement ++;
if (exception_->exception_nr == 5)
{
inErrorStatement = 0;
inWarningStatement = 0;
setStatisticsFlag = TRUE;
}
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->setStatisticsFlag(setStatisticsFlag);
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
// end rs
SRVRTRACE_EXIT(FILE_SME+4);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_EndTransaction'
*/
extern "C" void
odbc_SQLSvc_EndTransaction_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_EndTransaction_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ IDL_unsigned_short transactionOpt
, /* Out */ ERROR_DESC_LIST_def *sqlWarning
)
{
SRVRTRACE_ENTER(FILE_SME+5);
char stmtLabel[MAX_STMT_LABEL_LEN+1];
Int32 rc = SQL_SUCCESS;
SRVR_STMT_HDL *pSrvrStmt = NULL;
switch (transactionOpt) {
case SQL_COMMIT:
pSrvrStmt = getSrvrStmt("STMT_COMMIT_1", FALSE);
break;
case SQL_ROLLBACK:
pSrvrStmt = getSrvrStmt("STMT_ROLLBACK_1", FALSE);
break;
default:
exception_->exception_nr = odbc_SQLSvc_EndTransaction_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_TRANSACT_OPT;
return;
}
if (pSrvrStmt == NULL)
{
exception_->exception_nr = odbc_SQLSvc_EndTransaction_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
else
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->inputRowCnt = 1;
pSrvrStmt->sqlStmtType = TYPE_UNKNOWN;
pSrvrStmt->cursorName[0] = '\0';
pSrvrStmt->cursorNameLen = 0;
pSrvrStmt->inputValueList._buffer = NULL;
pSrvrStmt->inputValueList._length = 0;
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteN_ldx_;
rc = EXECUTE(pSrvrStmt);
if (rc == SQL_ERROR)
{
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
ERROR_DESC_def *error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 )
{
if (error_desc_def != NULL && (error_desc_def->sqlcode == -8605 || error_desc_def->sqlcode == -8607 || error_desc_def->sqlcode == -8609))
{
exception_->exception_nr = 0;
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
}
else
{
exception_->exception_nr = odbc_SQLSvc_EndTransaction_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
}
else
{
exception_->exception_nr = odbc_SQLSvc_EndTransaction_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_TRANSACT_OPT;
}
}
else if (rc != SQL_SUCCESS)
{
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
exception_->exception_nr = 0;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
}
}
SRVRTRACE_EXIT(FILE_SME+5);
return;
}
//LCOV_EXCL_START
/*
* Synchronous method function prototype for
* operation 'odbc_SQLSvc_ExecDirect'
*/
extern "C" void
odbc_SQLSvc_ExecDirect_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_ExecDirect_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_string cursorName
, /* In */ const IDL_char *stmtExplainLabel
, /* In */ IDL_short stmtType
, /* In */ IDL_short sqlStmtType
, /* In */ IDL_string sqlString
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *estimatedCost
, /* Out */ SQLItemDescList_def *outputDesc
, /* Out */ Int32 *rowsAffected
, /* Out */ ERROR_DESC_LIST_def *sqlWarning)
{
SRVRTRACE_ENTER(FILE_SME+6);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
Int32 holdestimatedCost;
if (sqlString == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_NULL_SQL_STMT;
}
else
{
// resource statistics
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
{
inState = STMTSTAT_EXECDIRECT;
inSqlStmtType = sqlStmtType;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
// resStatStatement->start(inState, stmtLabel, NULL, inEstimatedCost, sqlString); called in EXECDIRECT
}
//end rs
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) != NULL)
{
pSrvrStmt->cleanupAll();
pSrvrStmt->currentMethod = odbc_SQLSvc_Close_ldx_;
pSrvrStmt->freeResourceOpt = SQL_DROP;
FREESTATEMENT(pSrvrStmt);
}
// Need to validate the stmtLabel
// Given a label find out the SRVR_STMT_HDL
if ((pSrvrStmt = getSrvrStmt(stmtLabel, TRUE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
}
if (exception_->exception_nr == 0)
{
pSrvrStmt->cleanupAll();
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
pSrvrStmt->inState = inState;
pSrvrStmt->sqlStringLen = strlen(sqlString);
pSrvrStmt->sqlString = new char[pSrvrStmt->sqlStringLen+1];
if (pSrvrStmt->sqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "ExecDirect");
exit(0);
}
strcpy(pSrvrStmt->sqlString, sqlString);
pSrvrStmt->sqlStmtType = sqlStmtType;
if (exception_->exception_nr == 0)
{
pSrvrStmt->stmtType = stmtType;
if (cursorName != NULL && cursorName[0] != '\0')
{
pSrvrStmt->cursorNameLen = strlen(cursorName);
pSrvrStmt->cursorNameLen = pSrvrStmt->cursorNameLen < sizeof(pSrvrStmt->cursorName)? pSrvrStmt->cursorNameLen : sizeof(pSrvrStmt->cursorName);
strncpy(pSrvrStmt->cursorName, cursorName, sizeof(pSrvrStmt->cursorName));
pSrvrStmt->cursorName[sizeof(pSrvrStmt->cursorName)-1] = 0;
}
else
pSrvrStmt->cursorName[0] = '\0';
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecDirect_ldx_;
rc = EXECDIRECT(pSrvrStmt);
switch (rc)
{
case SQL_SUCCESS:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
break;
default:
break;
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
exception_->exception_nr = 0;
// Vijay - Changes to support not to parse tokens for statement type SELECT
holdestimatedCost = (Int32)pSrvrStmt->cost_info.totalTime; // SQL returns cost in a strcuture - cost.totalTime
if ((pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE) || (pSrvrStmt->sqlQueryType == SQL_SELECT_UNIQUE))
pSrvrStmt->sqlStmtType = TYPE_SELECT;
*estimatedCost = pSrvrStmt->sqlQueryType;
*rowsAffected = pSrvrStmt->rowsAffected;
outputDesc->_length = pSrvrStmt->outputDescList._length;
outputDesc->_buffer = pSrvrStmt->outputDescList._buffer;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLStillExecuting_exn_;
break;
case ODBC_RG_ERROR:
case SQL_ERROR:
case INFOSTATS_STMT_NOT_FOUND:
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_ExecDirect_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_ExecDirect_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECDIRECT_FAILED;
break;
default:
break;
}
if (resStatStatement != NULL)
resStatStatement->setStatistics(pSrvrStmt);
}
}
// resource statistics
if (resStatStatement != NULL && stmtType == EXTERNAL_STMT)
{
if (exception_->exception_nr != 0 && exception_->u.SQLError.errorList._buffer != NULL)
{
inErrorStatement ++;
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
if (sqlWarning->_length != 0)
inWarningStatement ++;
inRowCount = *rowsAffected;
if (sqlString == NULL)
sqlString = "";
inSqlString = new char[strlen(sqlString) + 1];
if (inSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "inSqlString");
exit(0);
}
strcpy(inSqlString,sqlString);
if (pSrvrStmt != NULL)
{
if (holdestimatedCost == -1)
inEstimatedCost = 0;
else
inEstimatedCost = pSrvrStmt->cost_info.totalTime; // res stat reports estimated cost as double
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
inSqlNewQueryType = pSrvrStmt->sqlNewQueryType;
}
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
inSqlNewQueryType,
pSrvrStmt->isClosed);
delete inSqlString;
}
//end rs
SRVRTRACE_EXIT(FILE_SME+6);
return;
}
//LCOV_EXCL_STOP
//LCOV_EXCL_START
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_FetchPerf'
*/
extern "C" void
odbc_SQLSvc_FetchPerf_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_FetchPerf_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ Int32 maxRowCnt
, /* In */ Int32 maxRowLen
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *rowsAffected
, /* Out */ SQL_DataValue_def *outputDataValue
, /* Out */ ERROR_DESC_LIST_def *sqlWarning)
{
SRVRTRACE_ENTER(FILE_SME+8);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
if (maxRowCnt < 0)
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_ROW_COUNT;
}
else
{
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
}
if (exception_->exception_nr == 0)
{
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->isClosed == FALSE && pSrvrStmt->bFetchStarted == FALSE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->bFetchStarted = TRUE;
pSrvrStmt->inState = inState = STMTSTAT_FETCH;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
}
// end rs
if (pSrvrStmt->sqlStmtType != TYPE_SELECT_CATALOG)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
pSrvrStmt->outputDataValue._length = 0;
pSrvrStmt->outputDataValue._buffer = 0;
if (pSrvrStmt->isClosed)
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLNoDataFound_exn_;
goto ret;
}
pSrvrStmt->currentMethod = odbc_SQLSvc_FetchPerf_ldx_;
if (pSrvrStmt->sqlBulkFetchPossible && pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE)
{
if (pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._buffer = NULL;
pSrvrStmt->outputDataValue._length = 0;
rc = FETCH2bulk(pSrvrStmt);
if (pSrvrStmt->rowsAffected > 0)
{
if(pSrvrStmt->outputDataValue._length == 0 && pSrvrStmt->outputDataValue._buffer == NULL)
{
outputDataValue->_buffer = pSrvrStmt->outputDescVarBuffer;
outputDataValue->_length = pSrvrStmt->outputDescVarBufferLen*pSrvrStmt->rowsAffected;
}
else
{
outputDataValue->_buffer = pSrvrStmt->outputDataValue._buffer;
outputDataValue->_length = pSrvrStmt->outputDataValue._length;
}
}
else
{
outputDataValue->_buffer = NULL;
outputDataValue->_length = 0;
}
// if (pSrvrStmt->PerfFetchRetcode == SQL_NO_DATA_FOUND)
// {
// char tmpString[32];
// tmpString[0] = '\0';
// sprintf(tmpString, "f: %Ld %d", pSrvrStmt->cliElapseTime, pSrvrStmt->rowsAffected);
// SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
// srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
// srvrGlobal->srvrObjRef, 1, tmpString);
// }
}
else
{
pSrvrStmt->maxRowCnt = maxRowCnt;
pSrvrStmt->maxRowLen = maxRowLen;
rc = FETCHPERF(pSrvrStmt, outputDataValue);
}
switch (rc)
{
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
switch (rc)
{
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
case SQL_SUCCESS:
exception_->exception_nr = 0;
*rowsAffected = pSrvrStmt->rowsAffected;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLStillExecuting_exn_;
break;
case SQL_INVALID_HANDLE:
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLInvalidHandle_exn_;
break;
case SQL_NO_DATA_FOUND:
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLNoDataFound_exn_;
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_FetchPerf_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_FETCH_FAILED;
default:
break;
}
}
else
{ // Catalog APIs
pSrvrStmt->maxRowCnt = maxRowCnt;
pSrvrStmt->maxRowLen = maxRowLen;
rc = FETCHPERF(pSrvrStmt, outputDataValue);
if (pSrvrStmt->sqlError.errorList._buffer != NULL)
{
rc = SQL_ERROR;
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
}
else if (pSrvrStmt->rowsAffected == 0 || pSrvrStmt->rowsAffected == -1)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
pSrvrStmt->outputDataValue._length = 0;
pSrvrStmt->outputDataValue._buffer = 0;
pSrvrStmt->InternalStmtClose(SQL_CLOSE);
rc = SQL_NO_DATA_FOUND;
exception_->exception_nr = odbc_SQLSvc_FetchPerf_SQLNoDataFound_exn_;
}
else
{
exception_->exception_nr = 0;
*rowsAffected = pSrvrStmt->rowsAffected;
outputDataValue->_length = pSrvrStmt->outputDataValue._length;
outputDataValue->_buffer = pSrvrStmt->outputDataValue._buffer;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
if (pSrvrStmt->sqlWarning._length != 0)
rc = SQL_SUCCESS_WITH_INFO;
else
rc = SQL_SUCCESS;
pSrvrStmt->rowsAffected = 0;
}
}
ret:
if (exception_->exception_nr != 0)
{
if (pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._length = 0;
pSrvrStmt->outputDataValue._buffer = NULL;
}
if (resStatStatement != NULL && pSrvrStmt->bFetchStarted == TRUE &&
(rc == SQL_NO_DATA_FOUND || rc == SQL_ERROR ||
((rc == SQL_SUCCESS || rc == SQL_SUCCESS_WITH_INFO) && *rowsAffected < maxRowCnt)))
{
resStatStatement->setStatistics(pSrvrStmt);
}
}
// resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->isClosed == TRUE && pSrvrStmt->bFetchStarted == TRUE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (rc == SQL_ERROR && exception_->u.SQLError.errorList._buffer != NULL)
{
ERROR_DESC_def *p_buffer = exception_->u.SQLError.errorList._buffer;
inErrorCode = p_buffer->sqlcode;
inSqlError = p_buffer->errorText;
inSqlErrorLength = strlen(p_buffer->errorText);
}
pSrvrStmt->bFetchStarted = FALSE;
Int32 inMaxRowCnt = 0;
Int32 inMaxRowLen = 0;
inMaxRowCnt = maxRowCnt;
inMaxRowLen = maxRowLen;
if (exception_->exception_nr != 0)
inErrorStatement ++;
else
setStatisticsFlag = FALSE;
if (sqlWarning->_length != 0)
inWarningStatement ++;
if (exception_->exception_nr == 5)
{
inErrorStatement = 0;
inWarningStatement = 0;
setStatisticsFlag = TRUE;
}
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->setStatisticsFlag(setStatisticsFlag);
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
//end rs
SRVRTRACE_EXIT(FILE_SME+8);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_ExecuteCall'
*/
extern "C" void
odbc_SQLSvc_ExecuteCall_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_ExecuteCall_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_string cursorName
, /* In */ IDL_short sqlStmtType
, /* In */ Int32 inputRowCnt
, /* In */ const SQLValueList_def *inputValueList
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ SQLValueList_def *outputValueList
, /* Out */ Int32 *rowsAffected
, /* Out */ ERROR_DESC_LIST_def *sqlWarning)
{
SRVRTRACE_ENTER(FILE_SME+13);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
/*
// resource statistics
inState = STMTSTAT_EXECUTE;
inSqlStmtType = sqlStmtType;
inEstimatedCost = 0;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
if (resStatStatement != NULL)
resStatStatement->start(inState, stmtLabel);
//end rs
*/
if ((pSrvrStmt = getSrvrStmt(stmtLabel, FALSE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
return;
}
if (exception_->exception_nr == 0)
{
rc = SQL_SUCCESS;
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->bSQLValueListSet)
pSrvrStmt->cleanupSQLValueList();
pSrvrStmt->inputValueList._buffer = inputValueList->_buffer;
pSrvrStmt->inputValueList._length = inputValueList->_length;
if (pSrvrStmt->outputValueList._buffer == NULL)
{
rc = AllocAssignValueBuffer(pSrvrStmt->bSQLValueListSet,&pSrvrStmt->outputDescList,
&pSrvrStmt->outputValueList, pSrvrStmt->outputDescVarBufferLen,
1, pSrvrStmt->outputValueVarBuffer);
}
else
pSrvrStmt->outputValueList._length = 0;
if (rc == SQL_SUCCESS)
{
pSrvrStmt->currentMethod = odbc_SQLSvc_ExecuteCall_ldx_;
rc = EXECUTECALL(pSrvrStmt);
switch (rc)
{
case SQL_SUCCESS:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// call SQL_EXEC_ClearDiagnostics
// CLEARDIAGNOSTICS(pSrvrStmt);
rc = SQL_SUCCESS_WITH_INFO;
case ODBC_SERVER_ERROR:
case ODBC_RG_ERROR:
default:
break;
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
break;
case ODBC_SERVER_ERROR:
// Allocate Error Desc
kdsCreateSQLErrorException(pSrvrStmt->bSQLMessageSet, &pSrvrStmt->sqlError, 1);
// Add SQL Error
kdsCopySQLErrorException(&pSrvrStmt->sqlError, NULL_VALUE_ERROR, NULL_VALUE_ERROR_SQLCODE,
NULL_VALUE_ERROR_SQLSTATE);
break;
case -8814:
case 8814:
rc = SQL_RETRY_COMPILE_AGAIN;
break;
default:
break;
}
}
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
exception_->exception_nr = 0;
// Copy the output values
*rowsAffected = 0;
outputValueList->_length = pSrvrStmt->outputValueList._length;
outputValueList->_buffer = pSrvrStmt->outputValueList._buffer;
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_STILL_EXECUTING:
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLStillExecuting_exn_;
break;
case SQL_INVALID_HANDLE:
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLInvalidHandle_exn_;
break;
case SQL_NO_DATA_FOUND:
// Added this for JDBC T4 driver since its uses this interface to get zero or one row
// for select statement besides CALL statement.
exception_->exception_nr = 100;
break;
case SQL_NEED_DATA:
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLNeedData_exn_;
break;
case ODBC_SERVER_ERROR:
case SQL_ERROR:
ERROR_DESC_def *error_desc_def;
error_desc_def = pSrvrStmt->sqlError.errorList._buffer;
if (pSrvrStmt->sqlError.errorList._length != 0 && error_desc_def->sqlcode == -8007)
{
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLQueryCancelled_exn_;
exception_->u.SQLQueryCancelled.sqlcode = error_desc_def->sqlcode;
}
else
{
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case -8814:
case 8814:
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_SQLRetryCompile_exn_;
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_ExecuteCall_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
default:
break;
}
/* SQL doesn't return statistics for Call statement?
if (resStatStatement != NULL && (srvrGlobal->resourceStatistics & STMTSTAT_EXECUTE))
resStatStatement->setStatistics(pSrvrStmt);
*/
}
/* SQL doesn't return statistics for Call statement?
// resource statistics
if (exception_.exception_nr != 0 && exception_.u.SQLError.errorList._buffer != NULL)
{
inErrorCode = exception_.u.SQLError.errorList._buffer->sqlcode;
inErrorStatement ++;
}
if (sqlWarning._length != 0)
inWarningStatement ++;
inRowCount = rowsAffected;
if (resStatStatement != NULL)
resStatStatement->end(inState,inSqlStmtType,inEstimatedCost,inSqlString,inErrorStatement,inWarningStatement,inRowCount,inErrorCode, resStatSession);
//end rs
*/
SRVRTRACE_EXIT(FILE_SME+13);
return;
}
//LCOV_EXCL_STOP
/*
* * Synchronous method function prototype for
* * operation 'odbc_SQLSvc_GetSQLCatalogs'
* */
extern "C" void
odbc_SQLSvc_GetSQLCatalogs_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_GetSQLCatalogs_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ IDL_short APIType
, /* In */ const IDL_char *catalogNm
, /* In */ const IDL_char *schemaNm
, /* In */ const IDL_char *tableNm
, /* In */ const IDL_char *tableTypeList
, /* In */ const IDL_char *columnNm
, /* In */ Int32 columnType
, /* In */ Int32 rowIdScope
, /* In */ Int32 nullable
, /* In */ Int32 uniqueness
, /* In */ Int32 accuracy
, /* In */ IDL_short sqlType
, /* In */ UInt32 metadataId
, /* In */ const IDL_char *fkcatalogNm
, /* In */ const IDL_char *fkschemaNm
, /* In */ const IDL_char *fktableNm
, /* Out */ IDL_char *catStmtLabel
, /* Out */ SQLItemDescList_def *outputDesc
, /* Out */ ERROR_DESC_LIST_def *sqlWarning
)
{
SRVRTRACE_ENTER(FILE_SME+14);
enum CATAPI_TABLE_INDEX {
COLUMNS = 0,
DEFAULTS,
INDEXES,
KEYS,
OBJECTS,
OBJECTUID,
TABLES,
VIEWS,
VIEWS_USAGE,
VERSIONS
};
char *smdCatAPITablesList[] = {
"COLUMNS",
"DEFAULTS",
"INDEXES",
"KEYS",
"OBJECTS",
"OBJECTUID",
"TABLES",
"VIEWS",
"VIEWS_USAGE",
"VERSIONS"
};
char CatalogQuery[20000];
SRVR_STMT_HDL *QryCatalogSrvrStmt = NULL;
odbc_SQLSvc_Prepare_exc_ prepareException;
odbc_SQLSvc_ExecuteN_exc_ executeException;
char *inputParam[16];
char *tableParam[20];
short retCode;
char RequestError[200 + 1];
char ConvertAPITypeToString[30];
Int32 curRowNo = 0;
Int32 numOfCols = 0;
Int32 curColNo = 0;
Int32 rowsAffected = 0;
Int32 rowsFetched;
char SQLObjType[2];
short EnvSetting = 0;
char MapDataType[2] = "0";
short retcode = 0;
char tmpBuf[20];
char odbcAppVersion[20];
char lc_tableTypeList[MAX_ANSI_NAME_LEN+1];
char *token;
char* saveptr;
odbc_SQLSvc_FetchN_exc_ fetchException;
odbc_SQLSvc_Close_exc_ CloseException;
CloseException.exception_nr=0;
fetchException.exception_nr=0;
if (resStatSession != NULL)
resStatSession->totalCatalogStatements++;
char catalogNmNoEsc[MAX_ANSI_NAME_LEN+1];
char schemaNmNoEsc[MAX_ANSI_NAME_LEN+1];
char tableNmNoEsc[MAX_ANSI_NAME_LEN+1];
char columnNmNoEsc[MAX_ANSI_NAME_LEN+1];
char expCatalogNm[MAX_ANSI_NAME_LEN+1];
char expSchemaNm[MAX_ANSI_NAME_LEN+1];
char expTableNm[MAX_ANSI_NAME_LEN+1];
char expColumnNm[MAX_ANSI_NAME_LEN+1];
char tableName1[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3];
char tableName2[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3];
char tableName3[MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+MAX_ANSI_NAME_LEN+3];
_itoa(srvrGlobal->appVersion.majorVersion, odbcAppVersion, 10);
if(diagnostic_flags)
{
switch (APIType)
{
case SQL_API_SQLTABLES:
sprintf(ConvertAPITypeToString, "SQLTables (%d)", SQL_API_SQLTABLES);
break;
case SQL_API_SQLCOLUMNS:
sprintf(ConvertAPITypeToString, "SQLColumns (%d)", SQL_API_SQLCOLUMNS);
break;
case SQL_API_SQLCOLUMNPRIVILEGES:
sprintf(ConvertAPITypeToString, "SQLColumnPrivileges (%d)", SQL_API_SQLCOLUMNPRIVILEGES);
break;
case SQL_API_SQLFOREIGNKEYS:
sprintf(ConvertAPITypeToString, "SQLForeignKeys (%d)", SQL_API_SQLFOREIGNKEYS);
break;
case SQL_API_SQLPRIMARYKEYS:
sprintf(ConvertAPITypeToString, "SQLPrimaryKeys (%d)", SQL_API_SQLPRIMARYKEYS);
break;
case SQL_API_SQLSPECIALCOLUMNS:
sprintf(ConvertAPITypeToString, "SQLSpecialColumns (%d)", SQL_API_SQLSPECIALCOLUMNS);
break;
case SQL_API_SQLSTATISTICS:
sprintf(ConvertAPITypeToString, "SQLStatistics (%d)", SQL_API_SQLSTATISTICS);
break;
case SQL_API_SQLTABLEPRIVILEGES:
sprintf(ConvertAPITypeToString, "SQLTablePrivileges (%d)", SQL_API_SQLTABLEPRIVILEGES);
break;
case SQL_API_SQLGETTYPEINFO:
sprintf(ConvertAPITypeToString, "SQLGetTypeInfo (%d)", SQL_API_SQLGETTYPEINFO);
break;
case SQL_API_SQLPROCEDURES:
sprintf(ConvertAPITypeToString, "SQLProcedures (%d)", SQL_API_SQLPROCEDURES);
break;
case SQL_API_SQLPROCEDURECOLUMNS:
sprintf(ConvertAPITypeToString, "SQLProcedureColumns (%d)", SQL_API_SQLPROCEDURECOLUMNS);
break;
case SQL_API_TBLSYNONYM:
sprintf(ConvertAPITypeToString, "SQLTblsynonym (%d)", SQL_API_TBLSYNONYM);
break;
case SQL_API_TBLMVS:
sprintf(ConvertAPITypeToString, "SQLTblMvs (%d)", SQL_API_TBLMVS);
break;
default:
sprintf(ConvertAPITypeToString, "Invalid Catalog API (%d)", APIType);
break;
}
TraceOut(TR_SRVR_KRYPTON_API,"odbc_SQLSvc_GetSQLCatalogs_sme_(%#x, %#x, %#x, %ld, %s, %s, %s, %s, %s, %s, %s, %ld, %ld, %ld, %ld, %ld, %d, %s, %#x, %#x)",
objtag_,
call_id_,
exception_,
dialogueId,
stmtLabel,
ConvertAPITypeToString,
catalogNm,
schemaNm,
tableNm,
tableTypeList,
columnNm,
columnType,
rowIdScope,
nullable,
uniqueness,
accuracy,
sqlType,
fkcatalogNm,
fkschemaNm,
fktableNm,
catStmtLabel,
outputDesc,
sqlWarning);
}
if (stmtLabel != NULL)
odbc_SQLSvc_Close_sme_(objtag_, call_id_, &CloseException, dialogueId, stmtLabel,
SQL_DROP, &rowsAffected, sqlWarning);
if ((tableNm[0] == '\0') && (columnNm[0] == '\0') && (metadataId == 1))
metadataId = 0;
if (APIType != SQL_API_SQLTABLES && APIType != SQL_API_SQLTABLES_JDBC)
{
if (tableNm[0] == '\0')
strcpy((char *)tableNm,"%");
if (columnNm[0] == '\0')
strcpy((char *)columnNm,"%");
}
exception_->exception_nr = 0;
CatalogQuery[0] = '\0';
strcpy(catStmtLabel, stmtLabel);
switch(APIType)
{
case SQL_API_SQLTABLES :
case SQL_API_SQLTABLES_JDBC :
if ((strcmp(catalogNm,"%") == 0) && (strcmp(schemaNm,"") == 0) && (strcmp(tableNm,"") == 0))
{
strcpy(catalogNmNoEsc, SEABASE_MD_CATALOG);
inputParam[0] = catalogNmNoEsc;
inputParam[1] = inputParam[0];
inputParam[2] = NULL;
if (APIType == SQL_API_SQLTABLES)
{
// strcpy((char *)catStmtLabel, "SQL_TABLES_ANSI_Q1");
snprintf(CatalogQuery, sizeof(CatalogQuery),
"select distinct(cast('%s' as varchar(128))) TABLE_CAT, "
"cast(NULL as varchar(128) ) TABLE_SCHEM, "
"cast(NULL as varchar(128) ) TABLE_NAME, "
"cast(NULL as varchar(128) ) TABLE_TYPE, "
"cast(NULL as varchar(128)) REMARKS "
"from TRAFODION.\"_MD_\".objects "
"where CATALOG_NAME = '%s' "
"FOR READ UNCOMMITTED ACCESS ORDER BY 4,1,2,3;",
inputParam[0], inputParam[1]);
}
else
{
// strcpy((char *)catStmtLabel, "SQL_JAVA_TABLES_ANSI_Q1");
snprintf(CatalogQuery, sizeof(CatalogQuery),
"select distinct(cast('%s' as varchar(128) )) TABLE_CAT "
"from TRAFODION.\"_MD_\".objects "
"where CATALOG_NAME = '%s' "
"FOR READ UNCOMMITTED ACCESS ORDER BY 1;",
inputParam[0], inputParam[1]);
}
}
else
if ((strcmp(catalogNm,"") == 0) && (strcmp(schemaNm,"%") == 0) && (strcmp(tableNm,"") == 0))
{
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
strcpy(catalogNmNoEsc, SEABASE_MD_CATALOG);
inputParam[0] = catalogNmNoEsc;
inputParam[1] = inputParam[0];
inputParam[2] = (char*) schemaNm;
inputParam[3] = expSchemaNm;
inputParam[4] = NULL;
if (APIType == SQL_API_SQLTABLES)
{
// strcpy((char *)catStmtLabel, "SQL_TABLES_ANSI_Q2");
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select distinct cast(NULL as varchar(128) ) TABLE_CAT, "
"cast(trim(SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM, "
"cast(NULL as varchar(128) ) TABLE_NAME, "
"cast(NULL as varchar(128) ) TABLE_TYPE, "
"cast(NULL as varchar(128)) REMARKS "
"from TRAFODION.\"_MD_\".objects "
"where "
"(CATALOG_NAME = '%s' or "
" CATALOG_NAME LIKE '%s' ESCAPE '\\') "
"and (SCHEMA_NAME = '%s' or "
"SCHEMA_NAME LIKE '%s' ESCAPE '\\') "
"FOR READ UNCOMMITTED ACCESS ORDER BY 4,1,2,3;",
inputParam[0], inputParam[1], inputParam[2],
inputParam[3]);
}
else
{
// strcpy((char *)catStmtLabel, "SQL_JAVA_TABLES_ANSI_Q2");
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select distinct "
"cast(trim(SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM, "
"cast(trim(CATALOG_NAME) as varchar(128) ) TABLE_CATALOG "
"from TRAFODION.\"_MD_\".objects "
"where "
"(CATALOG_NAME = '%s' or "
" CATALOG_NAME LIKE '%s' ESCAPE '\\') "
"and (SCHEMA_NAME = '%s' or "
"SCHEMA_NAME LIKE '%s' ESCAPE '\\') "
"FOR READ UNCOMMITTED ACCESS ORDER BY 2;",
inputParam[0], inputParam[1], inputParam[2],
inputParam[3]);
}
}
else
if ((strcmp(catalogNm,"") == 0) && (strcmp(schemaNm,"")
== 0) && (strcmp(tableNm,"") == 0) && (strcmp(tableTypeList,"%") == 0))
{
strcpy(catalogNmNoEsc, "%");
strcpy(schemaNmNoEsc, "%");
strcpy(tableNmNoEsc, "%");
// strcpy((char *)catStmtLabel, "SQL_TABLES_ANSI_Q4");
tableParam[0] = NULL;
inputParam[0] = NULL;
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select cast(NULL as varchar(128) ) TABLE_CAT,"
"cast(NULL as varchar(128) ) TABLE_SCHEM, "
"cast(NULL as varchar(128) ) TABLE_NAME,"
"trim(TABLE_TYPE) TABLE_TYPE,"
"cast(NULL as varchar(128)) REMARKS "
" from (VALUES "
"('TABLE'),"
"('SYSTEM TABLE'),"
"('VIEW'))"
" tp (\"TABLE_TYPE\")"
" FOR READ UNCOMMITTED ACCESS ORDER BY 4,1,2,3;");
}
else
{
if (tableNm[0] == '\0')
strcpy((char *)tableNmNoEsc,"%");
if (! checkIfWildCard(catalogNm, expCatalogNm))
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
if (strcmp(catalogNm,"") == 0) // If catalog empty default to system catalog
strcpy(tableName1,SEABASE_MD_CATALOG);
else
{
strncpy(tableName1,catalogNm, sizeof(tableName1));
tableName1[sizeof(tableName1)-1] = 0;
}
tableParam[0] = tableName1;
tableParam[1] = NULL;
convertWildcardNoEsc(metadataId, TRUE, schemaNm,schemaNmNoEsc);
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
if (tableNm[0] == '\0')
{
convertWildcardNoEsc(metadataId, TRUE, tableNmNoEsc, tableNmNoEsc);
convertWildcard(metadataId, TRUE, tableNmNoEsc, expTableNm);
}
else
{
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
}
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
if (tableTypeList == NULL || strlen(tableTypeList) == 0 || strcmp(tableTypeList,"%") == 0)
{
inputParam[4] = "UT"; // User Table
inputParam[5] = "BT";
inputParam[6] = "VI";
inputParam[7] = "SM"; // System MetaData
inputParam[8] = "BT";
inputParam[9] = NULL;
}
else
{
inputParam[4] = "";
inputParam[5] = "";
inputParam[6] = "";
inputParam[7] = "";
inputParam[8] = "";
inputParam[9] = NULL;
strncpy(lc_tableTypeList, tableTypeList, sizeof(lc_tableTypeList));
lc_tableTypeList[sizeof(lc_tableTypeList)-1] = 0;
token = strtok_r(lc_tableTypeList, " ,'", &saveptr);
while (token != NULL)
{
if (strcmp(token, "SYSTEM") == 0
)
{
token = strtok_r(NULL, ",'", &saveptr);
if (token != NULL && strcmp(token, "TABLE") == 0)
{
inputParam[7] = "SM";
inputParam[8] = "BT";
}
else
continue;
}
else
if (strcmp(token, "TABLE") == 0) {
inputParam[4] = "UT";
inputParam[5] = "BT";
}
else
if (strcmp(token, "VIEW") == 0)
{
inputParam[4] = "UT";
inputParam[6] = "VI";
}
token = strtok_r(NULL, " ,'", &saveptr);
}
}
if (APIType == SQL_API_SQLTABLES)
{
// strcpy((char *)catStmtLabel, "SQL_TABLES_ANSI_Q7");
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select cast('%s' as varchar(128) ) TABLE_CAT,"
"cast(trim(SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM,"
"cast(trim(OBJECT_NAME) as varchar(128) ) TABLE_NAME,"
"trim(case OBJECT_TYPE "
"when 'BT' then 'TABLE' "
"when 'VI' then 'VIEW' "
"end) TABLE_TYPE,"
"cast(NULL as varchar(128)) REMARKS "
" from TRAFODION.\"_MD_\".OBJECTS "
" where (SCHEMA_NAME = '%s' or "
" trim(SCHEMA_NAME) LIKE '%s' ESCAPE '\\')"
" and (OBJECT_NAME = '%s' or"
" trim(OBJECT_NAME) LIKE '%s' ESCAPE '\\')"
" and ((SCHEMA_NAME <> '_MD_' and '%s'='UT' and OBJECT_TYPE in ('%s', '%s'))"
" or (SCHEMA_NAME = '_MD_' and '%s'='SM' and OBJECT_TYPE in ('%s')))"
" FOR READ UNCOMMITTED ACCESS ORDER BY 4, 1, 2, 3 ;",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3], inputParam[4],
inputParam[5], inputParam[6], inputParam[7],
inputParam[8]);
}
else
{
// strcpy((char *)catStmtLabel, "SQL_JAVA_TABLES_ANSI_Q7");
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select cast('%s' as varchar(128) ) TABLE_CAT,"
"cast(trim(SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM,"
"cast(trim(OBJECT_NAME) as varchar(128) ) TABLE_NAME,"
"trim(case OBJECT_TYPE "
"when 'BT' then 'TABLE' "
"when 'VI' then 'VIEW' "
"end) TABLE_TYPE,"
"cast(NULL as varchar(128)) REMARKS, "
"cast(NULL as varchar(128)) TYPE_CAT,"
"cast(NULL as varchar(128)) TYPE_SCHEM, "
"cast(NULL as varchar(128)) TYPE_NAME,"
"cast(NULL as varchar(128)) SELF_REFERENCING_COL_NAME, "
"cast(NULL as varchar(128)) REF_GENERATION"
" from TRAFODION.\"_MD_\".OBJECTS "
" where (SCHEMA_NAME = '%s' or "
" trim(SCHEMA_NAME) LIKE '%s' ESCAPE '\\')"
" and (OBJECT_NAME = '%s' or"
" trim(OBJECT_NAME) LIKE '%s' ESCAPE '\\')"
" and ((SCHEMA_NAME <> '_MD_' and '%s'='UT' and OBJECT_TYPE in ('%s', '%s'))"
" or (SCHEMA_NAME = '_MD_' and '%s'='SM' and OBJECT_TYPE in ('%s')))"
" FOR READ UNCOMMITTED ACCESS ORDER BY 4, 1, 2, 3 ;",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3], inputParam[4],
inputParam[5], inputParam[6], inputParam[7],
inputParam[8]);
}
}
break;
case SQL_API_SQLGETTYPEINFO :
{
char condExpression[20] = {0};
switch(sqlType) {
case SQL_DATE:
sqlType == SQL_TYPE_DATE;
break;
case SQL_TIME:
sqlType == SQL_TYPE_TIME;
break;
case SQL_TIMESTAMP:
sqlType == SQL_TYPE_TIMESTAMP;
break;
default:
break;
}
if(sqlType == SQL_ALL_TYPES)
sprintf(condExpression, "1=1");
else
sprintf(condExpression, "DATA_TYPE=%d", sqlType);
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select distinct TYPE_NAME TYPE_NAME,"
"DATA_TYPE DATA_TYPE,PREC COLUMN_SIZE,"
"LITERAL_PREFIX LITERAL_PREFIX,"
"LITERAL_SUFFIX LITERAL_SUFFIX,"
"CREATE_PARAMS CREATE_PARAMS,"
"IS_NULLABLE NULLABLE,"
"CASE_SENSITIVE CASE_SENSITIVE,"
"SEARCHABLE SEARCHABLE,"
"UNSIGNED_ATTRIBUTE UNSIGNED_ATTRIBUTE,"
"FIXED_PREC_SCALE FIXED_PREC_SCALE,"
"AUTO_UNIQUE_VALUE AUTO_UNIQUE_VALUE,"
"LOCAL_TYPE_NAME LOCAL_TYPE_NAME,"
"MINIMUM_SCALE MINIMUM_SCALE,"
"MAXIMUM_SCALE MAXIMUM_SCALE,"
"SQL_DATA_TYPE SQL_DATA_TYPE,"
"SQL_DATETIME_SUB SQL_DATETIME_SUB,"
"NUM_PREC_RADIX NUM_PREC_RADIX,"
"INTERVAL_PRECISION INTERVAL_PRECISION "
" from "
" (VALUES "
"(cast('BIGINT' as varchar(128)),cast(-5 as smallint), cast(19 as integer), cast (NULL as varchar(128)), cast (NULL as varchar(128)),"
"cast (NULL as varchar(128)), cast(1 as smallint), cast(0 as smallint), cast(2 as smallint) , cast(0 as smallint), cast(0 as smallint),"
"cast(0 as smallint), cast('LARGEINT' as varchar(128)), cast(NULL as smallint), cast(NULL as smallint), cast('LARGEINT' as varchar(128)),"
"cast(10 as smallint), cast(19 as integer), cast(20 as integer), cast(-402 as smallint), cast(NULL as smallint), cast(NULL as smallint),"
"cast(0 as smallint), cast(0 as smallint), cast(3 as smallint), cast(0 as smallint)),"
"('BIGINT SIGNED', -5, 19, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'LARGEINT', NULL, NULL, 'SIGNED LARGEINT', 19, 19, -1, -5, NULL, NULL, 0, 0, 3, 0),"
"('BIGINT UNSIGNED', -5, 20, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'LARGEINT', NULL, NULL, 'UNSIGNED LARGEINT', 20, 20, -1, -405, NULL, NULL, 0, 0, 3, 0),"
"('CHAR', 1, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'CHARACTER', NULL, NULL, 'CHARACTER', NULL, -1, -1, 1, NULL, NULL, 0, 0, 3, 0),"
"('NCHAR', -8, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'WCHAR', NULL, NULL, 'WCHAR', NULL, -1, -1, -8, NULL, NULL, 0, 0, 3, 0),"
"('NCHAR VARYING', -9, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'WCHAR VARYING', NULL, NULL, 'VARWCHAR', NULL, -1, -1, -9, NULL, NULL, 0, 0, 3, 0),"
"('DATE', 91, 10, '{d ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'DATE', NULL, NULL, 'DATE', NULL, 10, 6, 9, 1, NULL, 1, 3, 3, 0),"
"('DECIMAL', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'DECIMAL', 0, 18, 'DECIMAL', 10, -2, -3, 3, NULL, NULL, 0, 0, 3, 0),"
"('DECIMAL SIGNED', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'DECIMAL', 0, 18, 'SIGNED DECIMAL', 10, -2, -3, 3, NULL, NULL, 0, 0, 3, 0),"
"('DECIMAL UNSIGNED', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'DECIMAL', 0, 18, 'UNSIGNED DECIMAL', 10, -2, -3, -301, NULL, NULL, 0, 0, 3, 0),"
"('DOUBLE PRECISION', 8, 15, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'DOUBLE', NULL, NULL, 'DOUBLE PRECISION', 2, 54, -1, 8, NULL, NULL, 0, 0, 3, 0),"
"('DOUBLE PRECISION', 8, 15, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'DOUBLE', NULL, NULL, 'DOUBLE', 2, 54, -1, 8, NULL, NULL, 0, 0, 3, 0),"
"('FLOAT', 6, 15, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'FLOAT', NULL, NULL, 'FLOAT', 2, -2, -1, 6, NULL, NULL, 0, 0, 3, 0),"
"('INTEGER', 4, 9, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'INTEGER', NULL, NULL, 'INTEGER', 9, 9, -1, 4, NULL, NULL, 0, 0, 3, 0),"
"('INTEGER SIGNED', 4, 9, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'INTEGER', NULL, NULL, 'SIGNED INTEGER', 9, 9, -1, 4, NULL, NULL, 0, 0, 3, 0),"
"('INTEGER UNSIGNED', 4, 10, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'INTEGER', NULL, NULL, 'UNSIGNED INTEGER', 10, 10, -1, -401, NULL, NULL, 0, 0, 3, 0),"
"('INTERVAL', 113, 0, '{INTERVAL ''', ''' MINUTE TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 3, 34, 100, 13, 2, 5, 6, 3, 0),"
"('INTERVAL', 105, 0, '{INTERVAL ''', ''' MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 5, 2, 5, 5, 3, 0),"
"('INTERVAL', 101, 0, '{INTERVAL ''', ''' YEAR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 1, 2, 1, 1, 3, 0),"
"('INTERVAL', 106, 0, '{INTERVAL ''', ''' SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 6, 2, 6, 6, 3, 0),"
"('INTERVAL', 104, 0, '{INTERVAL ''', ''' HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 4, 2, 4, 4, 3, 0),"
"('INTERVAL', 107, 0, '{INTERVAL ''', ''' YEAR TO MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 3, 34, 100, 7, 2, 1, 2, 3, 0),"
"('INTERVAL', 108, 0, '{INTERVAL ''', ''' DAY TO HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 3, 34, 100, 8, 2, 3, 4, 3, 0),"
"('INTERVAL', 102, 0, '{INTERVAL ''', ''' MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 2, 2, 2, 2, 3, 0),"
"('INTERVAL', 111, 0, '{INTERVAL ''', ''' HOUR TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 3, 34, 100, 11, 2, 4, 5, 3, 0),"
"('INTERVAL', 112, 0, '{INTERVAL ''', ''' HOUR TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 6, 34, 100, 12, 2, 4, 6, 3, 0),"
"('INTERVAL', 110, 0, '{INTERVAL ''', ''' DAY TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 9, 34, 100, 10, 2, 3, 6, 3, 0),"
"('INTERVAL', 109, 0, '{INTERVAL ''', ''' DAY TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 6, 34, 100, 9, 2, 3, 5, 3, 0),"
"('INTERVAL', 103, 0, '{INTERVAL ''', ''' DAY}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', NULL, 0, 34, 100, 3, 2, 3, 3, 3, 0),"
"('NUMERIC', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'NUMERIC', 0, 128, 'NUMERIC', 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0),"
"('NUMERIC SIGNED', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'NUMERIC', 0, 128, 'SIGNED NUMERIC', 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0),"
"('NUMERIC UNSIGNED', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'NUMERIC', 0, 128, 'UNSIGNED NUMERIC', 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0),"
"('REAL', 7, 7, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'REAL', NULL, NULL, 'REAL', 2, 22, -1, 7, NULL, NULL, 0, 0, 3, 0),"
"('SMALLINT', 5, 5, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'SMALLINT', NULL, NULL, 'SMALLINT', 5, 5, -1, 5, NULL, NULL, 0, 0, 3, 0),"
"('SMALLINT SIGNED', 5, 5, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'SMALLINT', NULL, NULL, 'SIGNED SMALLINT', 5, 5, -1, 5, NULL, NULL, 0, 0, 3, 0),"
"('SMALLINT UNSIGNED', 5, 5, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'SMALLINT', NULL, NULL, 'UNSIGNED SMALLINT', 5, 5, -1, -502, NULL, NULL, 0, 0, 3, 0),"
"('TIME', 92, 8, '{t ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIME', NULL, NULL, 'TIME', NULL, 8, 6, 9, 2, NULL, 4, 6, 3, 0),"
"('TIMESTAMP', 93, 26, '{ts ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIMESTAMP', 0, 6, 'TIMESTAMP', NULL, 19, 16, 9, 3, NULL, 1, 6, 3, 0),"
"('VARCHAR', 12, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'VARCHAR', NULL, NULL, 'VARCHAR', NULL, -1, -1, 12, NULL, NULL, 0, 0, 3, 0),"
"('LONG VARCHAR', -1, 2000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'LONG VARCHAR', NULL, NULL, 'VARCHAR', NULL, -1, -1, -1, NULL, NULL, 0, 0, 3, 0),"
"('TINYINT', -6, 3, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'TINYINT', NULL, NULL, 'TINYINT', 3, 3, -1, 4, NULL, NULL, 0, 0, 3, 0),"
"('TINYINT SIGNED', -6, 3, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'TINYINT', NULL, NULL, 'SIGNED TINYINT', 3, 3, -1, 4, NULL, NULL, 0, 0, 3, 0),"
"('TINYINT UNSIGNED', -6, 3, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'TINYINT', NULL, NULL, 'UNSIGNED TINYINT', 3, 3, -1, -404, NULL, NULL, 0, 0, 3, 0)"
" ) "
" dt(\"TYPE_NAME\", \"DATA_TYPE\", \"PREC\", \"LITERAL_PREFIX\", \"LITERAL_SUFFIX\", \"CREATE_PARAMS\", \"IS_NULLABLE\", \"CASE_SENSITIVE\", \"SEARCHABLE\","
"\"UNSIGNED_ATTRIBUTE\", \"FIXED_PREC_SCALE\", \"AUTO_UNIQUE_VALUE\", \"LOCAL_TYPE_NAME\", \"MINIMUM_SCALE\", \"MAXIMUM_SCALE\", \"SQL_TYPE_NAME\","
"\"NUM_PREC_RADIX\", \"USEPRECISION\", \"USELENGTH\", \"SQL_DATA_TYPE\", \"SQL_DATETIME_SUB\", \"INTERVAL_PRECISION\", \"DATETIMESTARTFIELD\","
"\"DATETIMEENDFIELD\", \"APPLICATION_VERSION\", \"TRANSLATION_ID\")"
" WHERE %s"
" ORDER BY 2,1 FOR READ UNCOMMITTED ACCESS ;", condExpression);
break;
}
case SQL_API_SQLPROCEDURES :
case SQL_API_SQLPROCEDURES_JDBC:
// strcpy((char *)catStmtLabel, "SQL_PROCEDURES_ANSI_Q4");
if (!checkIfWildCard(catalogNm, expCatalogNm) && !metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
if (strcmp(catalogNm,"") == 0)
strcpy(tableName1,SEABASE_MD_CATALOG);
else
strcpy(tableName1,catalogNm);
tableParam[0] = tableName1;
tableParam[1] = NULL;
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
inputParam[4] = NULL;
if( APIType == SQL_API_SQLPROCEDURES )
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select "
"cast('%s' as varchar(128) ) PROCEDURE_CAT, "
"cast(trim(SCHEMA_NAME) as varchar(128) ) PROCEDURE_SCHEM, "
"cast(trim(OBJECT_NAME) as varchar(128) ) PROCEDURE_NAME, "
"cast (NULL as smallint) NUM_INPUT_PARAMS, "
"cast (NULL as smallint) NUM_OUTPUT_PARAMS, "
"cast (NULL as smallint) NUM_RESULT_SETS, "
"cast (NULL as varchar(128)) REMARKS, "
"(case OBJECT_TYPE "
"when 'UR' then cast(1 as smallint) "
"else cast(0 as smallint) end) PROCEDURE_TYPE "
"from "
"TRAFODION.\"_MD_\".OBJECTS "
"where (SCHEMA_NAME = '%s' "
"or trim(SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (OBJECT_NAME = '%s' "
"or trim(OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and OBJECT_TYPE = 'UR' "
"FOR READ UNCOMMITTED ACCESS ORDER BY 4, 1, 2, 3 ;",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3]);
}
else
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select "
"obj.CATALOG_NAME PROCEDURE_CAT, obj.SCHEMA_NAME PROCEDURE_SCHEMA,"
"obj.OBJECT_NAME PROCEDURE_NAME, cast(NULL as varchar(10)) R1,cast(NULL as varchar(10)) R2,"
"cast(NULL as varchar(10)) R3, cast(NULL as varchar(10)) REMARKS,"
"cast(case when routines.UDR_TYPE = 'P' then 1"
" when routines.UDR_TYPE = 'F' or routines.UDR_TYPE = 'T'"
" then 2 else 0 end as smallint) PROCEDURE_TYPE,"
"obj.OBJECT_NAME SPECIFIC_NAME "
"from "
"TRAFODION.\"_MD_\".OBJECTS obj "
"left join TRAFODION.\"_MD_\".ROUTINES routines on obj.OBJECT_UID = routines.UDR_UID "
"where (obj.SCHEMA_NAME = '%s' "
"or trim(obj.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (obj.OBJECT_NAME = '%s' "
"or trim(obj.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and obj.OBJECT_TYPE = 'UR' "
"FOR READ UNCOMMITTED ACCESS ORDER BY 4, 1, 2, 3 ;",
inputParam[0], inputParam[1],
inputParam[2], inputParam[3]);
}
break;
case SQL_API_SQLPROCEDURECOLUMNS:
case SQL_API_SQLPROCEDURECOLUMNS_JDBC:
if (!checkIfWildCard(catalogNm, expCatalogNm) && !metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
if (strcmp(catalogNm,"") == 0)
strcpy(tableName1,SEABASE_MD_CATALOG);
else
strcpy(tableName1,catalogNm);
tableParam[0] = tableName1;
tableParam[1] = NULL;
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
convertWildcardNoEsc(metadataId, TRUE, columnNm, columnNmNoEsc);
convertWildcard(metadataId, TRUE, columnNm, expColumnNm);
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
inputParam[4] = columnNmNoEsc;
inputParam[5] = expColumnNm;
inputParam[6] = NULL;
if( APIType == SQL_API_SQLPROCEDURECOLUMNS )
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select obj.CATALOG_NAME PROCEDURE_CAT, obj.SCHEMA_NAME PROCEDURE_SCHEM,"
"obj.OBJECT_NAME PROCEDURE_NAME, cols.COLUMN_NAME COLUMN_NAME,"
"cast((case when cols.DIRECTION='I' then 1 when cols.DIRECTION='N' "
"then 2 when cols.DIRECTION='O' then 3 else 0 end) as smallint) COLUMN_TYPE,"
"cols.FS_DATA_TYPE DATA_TYPE, cols.SQL_DATA_TYPE TYPE_NAME,"
"cols.COLUMN_PRECISION \"PRECISION\", cols.COLUMN_SIZE LENGTH, cols.COLUMN_SCALE SCALE,"
"cast(1 as smallint) RADIX, cols.NULLABLE NULLABLE, cast(NULL as varchar(10)) REMARKS,"
"cols.DEFAULT_VALUE COLUMN_DEF, cols.FS_DATA_TYPE SQL_DATA_TYPE, cast(0 as smallint) SQL_DATETIME_SUB,"
"cols.COLUMN_SIZE CHAR_OCTET_LENGTH, cols.COLUMN_NUMBER ORDINAL_POSITION,"
"cols.NULLABLE IS_NULLABLE"
" from TRAFODION.\"_MD_\".OBJECTS obj"
" left join TRAFODION.\"_MD_\".COLUMNS cols on obj.OBJECT_UID=cols.OBJECT_UID"
" where"
" (obj.SCHEMA_NAME = '%s' or trim(obj.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
" and (obj.OBJECT_NAME = '%s' or trim(obj.OBJECT_NAME) LIKE '%s' ESCAPE '\\')"
" and (cols.COLUMN_NAME = '%s' or trim(cols.COLUMN_NAME) LIKE '%s' ESCAPE '\\')"
" order by PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION"
" FOR READ UNCOMMITTED ACCESS",
inputParam[0], inputParam[1],
inputParam[2], inputParam[3],
inputParam[4], inputParam[5]);
}
else
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select obj.CATALOG_NAME PROCEDURE_CAT, obj.SCHEMA_NAME PROCEDURE_SCHEM,"
"obj.OBJECT_NAME PROCEDURE_NAME, cols.COLUMN_NAME COLUMN_NAME,"
"cast((case when cols.DIRECTION='I' then 1 when cols.DIRECTION='N' then 2 when cols.DIRECTION='O' then 3 else 0 end) as smallint) COLUMN_TYPE,"
"cols.FS_DATA_TYPE DATA_TYPE, cols.SQL_DATA_TYPE TYPE_NAME,"
"cols.COLUMN_PRECISION \"PRECISION\", cols.COLUMN_SIZE LENGTH, cols.COLUMN_SCALE SCALE,"
"cast(1 as smallint) RADIX, cols.NULLABLE NULLABLE, cast(NULL as varchar(10)) REMARKS,"
"cols.DEFAULT_VALUE COLUMN_DEF, cols.FS_DATA_TYPE SQL_DATA_TYPE, cast(0 as smallint) SQL_DATETIME_SUB,"
"cols.COLUMN_SIZE CHAR_OCTET_LENGTH, cols.COLUMN_NUMBER ORDINAL_POSITION,"
"cols.NULLABLE IS_NULLABLE, cols.COLUMN_NAME SPECIFIC_NAME"
" from TRAFODION.\"_MD_\".OBJECTS obj"
" left join TRAFODION.\"_MD_\".COLUMNS cols on obj.OBJECT_UID=cols.OBJECT_UID"
" where"
" (obj.SCHEMA_NAME = '%s' or trim(obj.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
" and (obj.OBJECT_NAME = '%s' or trim(obj.OBJECT_NAME) LIKE '%s' ESCAPE '\\')"
" and (cols.COLUMN_NAME = '%s' or trim(cols.COLUMN_NAME) LIKE '%s' ESCAPE '\\')"
" order by PROCEDURE_CAT, PROCEDURE_SCHEM, PROCEDURE_NAME, ORDINAL_POSITION"
" FOR READ UNCOMMITTED ACCESS",
inputParam[0], inputParam[1],
inputParam[2], inputParam[3],
inputParam[4], inputParam[5]);
}
break;
case SQL_API_SQLCOLUMNS :
case SQL_API_SQLCOLUMNS_JDBC :
if (!checkIfWildCard(catalogNm, catalogNmNoEsc) && !metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
if (tableNm[0] != '$' && tableNm[0] != '\\')
{
if (strcmp(catalogNm,"") == 0)
strcpy(tableName1,SEABASE_MD_CATALOG);
else
strcpy(tableName1, catalogNm);
/*
if (APIType == SQL_API_SQLCOLUMNS)
strcpy((char *)catStmtLabel, "SQL_COLUMNS_UNICODE_Q4");
else
strcpy((char *)catStmtLabel, "SQL_JAVA_COLUMNS_ANSI_Q4");
*/
tableParam[0] = tableName1;
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
convertWildcard(metadataId, TRUE, columnNm, expColumnNm);
convertWildcardNoEsc(metadataId, TRUE, columnNm, columnNmNoEsc);
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
inputParam[4] = columnNmNoEsc;
inputParam[5] = expColumnNm;
inputParam[6] = odbcAppVersion;
inputParam[7] = NULL;
if (strncmp(odbcAppVersion, "3", 1) == 0)
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select "
"cast('%s' as varchar(128) ) TABLE_CAT, "
"cast(trim(ob.SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM, "
"cast(trim(ob.OBJECT_NAME) as varchar(128) ) TABLE_NAME, "
"cast(trim(co.COLUMN_NAME) as varchar(128) ) COLUMN_NAME, "
"cast((case when co.FS_DATA_TYPE = 0 and co.character_set = 'UCS2' then -8 "
"when co.FS_DATA_TYPE = 64 and co.character_set = 'UCS2' then -9 else dt.DATA_TYPE end) as smallint) DATA_TYPE, "
"trim(dt.TYPE_NAME) TYPE_NAME, "
"cast((case when co.FS_DATA_TYPE = 0 and co.character_set = 'UCS2' then co.COLUMN_SIZE/2 "
"when co.FS_DATA_TYPE = 64 and co.character_set = 'UCS2' then co.COLUMN_SIZE/2 "
"when dt.USEPRECISION = -1 then co.COLUMN_SIZE when dt.USEPRECISION = -2 then co.COLUMN_PRECISION "
"when co.FS_DATA_TYPE = 192 then dt.USEPRECISION "
"when co.FS_DATA_TYPE >= 195 and co.FS_DATA_TYPE <= 207 then dt.USEPRECISION + 1 "
"else dt.USEPRECISION end) as integer) COLUMN_SIZE, "
"cast((case when dt.USELENGTH = -1 then co.COLUMN_SIZE when dt.USELENGTH = -2 then co.COLUMN_PRECISION "
"when dt.USELENGTH = -3 then co.COLUMN_PRECISION + 2 "
"else dt.USELENGTH end) as integer) BUFFER_LENGTH, "
"cast(co.COLUMN_SCALE as smallint) DECIMAL_DIGITS, "
"cast(dt.NUM_PREC_RADIX as smallint) NUM_PREC_RADIX, "
"cast(co.NULLABLE as smallint) NULLABLE, "
"cast('' as varchar(128)) REMARKS, "
"trim(co.DEFAULT_VALUE) COLUMN_DEF, "
"cast((case when co.FS_DATA_TYPE = 0 and co.character_set = 'UCS2' then -8 "
"when co.FS_DATA_TYPE = 64 and co.character_set = 'UCS2' then -9 else dt.SQL_DATA_TYPE end) as smallint) SQL_DATA_TYPE, "
"cast(dt.SQL_DATETIME_SUB as smallint) SQL_DATETIME_SUB, cast((case dt.DATA_TYPE when 1 then co.COLUMN_SIZE "
"when -1 then co.COLUMN_SIZE when 12 then co.COLUMN_SIZE else NULL end) as integer) CHAR_OCTET_LENGTH, "
"cast((case when (trim(co1.COLUMN_CLASS) <> 'S') then co.column_number+1 else "
"co.column_number end) as integer) ORDINAL_POSITION, "
"cast((case when co.NULLABLE = 0 then 'NO' else 'YES' end) as varchar(3)) IS_NULLABLE "
"from "
"TRAFODION.\"_MD_\".objects ob, "
"TRAFODION.\"_MD_\".columns co, "
"TRAFODION.\"_MD_\".columns co1, "
"(VALUES ("
"cast('BIGINT' as varchar(128)),cast(-5 as smallint), cast(19 as integer), cast (NULL as varchar(128)), cast (NULL as varchar(128)), "
"cast (NULL as varchar(128)), cast(1 as smallint), cast(0 as smallint), cast(2 as smallint) , cast(0 as smallint), cast(0 as smallint), "
"cast(0 as smallint), cast('LARGEINT' as varchar(128)), cast(NULL as smallint), cast(NULL as smallint), cast('SIGNED LARGEINT' as varchar(128)), cast(134 as integer), "
"cast(10 as smallint), cast(19 as integer), cast(20 as integer), cast(-402 as smallint), cast(NULL as smallint), cast(NULL as smallint), "
"cast(0 as smallint), cast(0 as smallint), cast(3 as smallint), cast(0 as smallint)), "
"('CHAR', 1, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'CHARACTER', NULL, NULL, 'CHARACTER', 0, NULL, -1, -1, 1, NULL, NULL, 0, 0, 3, 0), "
"('DATE', 91, 10, '{d ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'DATE', NULL, NULL, 'DATE', 192, NULL, 10, 6, 9, 1, NULL, 1, 3, 3, 0), "
"('DECIMAL', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'DECIMAL', 0, 18, 'SIGNED DECIMAL', 152, 10, -2, -3, 3, NULL, NULL, 0, 0, 3, 0), "
"('DECIMAL UNSIGNED', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'DECIMAL', 0, 18, 'UNSIGNED DECIMAL', 150, 10, -2, -3, -301, NULL, NULL, 0, 0, 3, 0), "
"('DOUBLE PRECISION', 8, 15, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'DOUBLE', NULL, NULL, 'DOUBLE', 143, 2, 54, -1, 8, NULL, NULL, 0, 0, 3, 0), "
"('INTEGER', 4, 10, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'INTEGER', NULL, NULL, 'SIGNED INTEGER', 132, 10, 10, -1, 4, NULL, NULL, 0, 0, 3, 0), "
"('INTEGER UNSIGNED', 4, 10, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'INTEGER', NULL, NULL, 'UNSIGNED INTEGER', 133, 10, 10, -1, -401, NULL, NULL, 0, 0, 3, 0), "
"('INTERVAL', 113, 0, '{INTERVAL ''', ''' MINUTE TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 205, NULL, 3, 34, 100, 13, 2, 5, 6, 3, 0), "
"('INTERVAL', 105, 0, '{INTERVAL ''', ''' MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 201, NULL, 0, 34, 100, 5, 2, 5, 5, 3, 0), "
"('INTERVAL', 101, 0, '{INTERVAL ''', ''' YEAR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 195, NULL, 0, 34, 100, 1, 2, 1, 1, 3, 0), "
"('INTERVAL', 106, 0, '{INTERVAL ''', ''' SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 204, NULL, 0, 34, 100, 6, 2, 6, 6, 3, 0), "
"('INTERVAL', 104, 0, '{INTERVAL ''', ''' HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 199, NULL, 0, 34, 100, 4, 2, 4, 4, 3, 0), "
"('INTERVAL', 107, 0, '{INTERVAL ''', ''' YEAR TO MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 197, NULL, 3, 34, 100, 7, 2, 1, 2, 3, 0), "
"('INTERVAL', 108, 0, '{INTERVAL ''', ''' DAY TO HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 200, NULL, 3, 34, 100, 8, 2, 3, 4, 3, 0), "
"('INTERVAL', 102, 0, '{INTERVAL ''', ''' MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 196, NULL, 0, 34, 100, 2, 2, 2, 2, 3, 0), "
"('INTERVAL', 111, 0, '{INTERVAL ''', ''' HOUR TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 202, NULL, 3, 34, 100, 11, 2, 4, 5, 3, 0), "
"('INTERVAL', 112, 0, '{INTERVAL ''', ''' HOUR TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 206, NULL, 6, 34, 100, 12, 2, 4, 6, 3, 0), "
"('INTERVAL', 110, 0, '{INTERVAL ''', ''' DAY TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 207, NULL, 9, 34, 100, 10, 2, 3, 6, 3, 0), "
"('INTERVAL', 109, 0, '{INTERVAL ''', ''' DAY TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 203, NULL, 6, 34, 100, 9, 2, 3, 5, 3, 0), "
"('INTERVAL', 103, 0, '{INTERVAL ''', ''' DAY}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 198, NULL, 0, 34, 100, 3, 2, 3, 3, 3, 0), "
"('NUMERIC', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'NUMERIC', 0, 128, 'SIGNED NUMERIC', 156, 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0), "
"('NUMERIC UNSIGNED', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'NUMERIC', 0, 128, 'UNSIGNED NUMERIC', 155, 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0), "
"('REAL', 7, 7, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'REAL', NULL, NULL, 'REAL', 142, 2, 22, -1, 7, NULL, NULL, 0, 0, 3, 0), "
"('SMALLINT', 5, 5, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'SMALLINT', NULL, NULL, 'SIGNED SMALLINT', 130, 10, 5, -1, 5, NULL, NULL, 0, 0, 3, 0), "
"('SMALLINT UNSIGNED', 5, 5, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'SMALLINT', NULL, NULL, 'UNSIGNED SMALLINT', 131, 10, 5, -1, -502, NULL, NULL, 0, 0, 3, 0), "
"('TIME', 92, 8, '{t ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIME', NULL, NULL, 'TIME', 192, NULL, 8, 6, 9, 2, NULL, 4, 6, 3, 0), "
"('TIMESTAMP', 93, 26, '{ts ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIMESTAMP', 0, 6, 'TIMESTAMP', 192, NULL, 19, 16, 9, 3, NULL, 1, 6, 3, 0), "
"('VARCHAR', 12, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'VARCHAR', NULL, NULL, 'VARCHAR', 64, NULL, -1, -1, 12, NULL, NULL, 0, 0, 3, 0) "
" ) "
"dt(\"TYPE_NAME\", \"DATA_TYPE\", \"PREC\", \"LITERAL_PREFIX\", \"LITERAL_SUFFIX\", \"CREATE_PARAMS\", \"IS_NULLABLE\", \"CASE_SENSITIVE\", \"SEARCHABLE\", "
"\"UNSIGNED_ATTRIBUTE\", \"FIXED_PREC_SCALE\", \"AUTO_UNIQUE_VALUE\", \"LOCAL_TYPE_NAME\", \"MINIMUM_SCALE\", \"MAXIMUM_SCALE\", \"SQL_TYPE_NAME\", \"FS_DATA_TYPE\", "
"\"NUM_PREC_RADIX\", \"USEPRECISION\", \"USELENGTH\", \"SQL_DATA_TYPE\", \"SQL_DATETIME_SUB\", \"INTERVAL_PRECISION\", \"DATETIMESTARTFIELD\", "
"\"DATETIMEENDFIELD\", \"APPLICATION_VERSION\", \"TRANSLATION_ID\") "
"where ob.OBJECT_UID = co.OBJECT_UID "
"and dt.FS_DATA_TYPE = co.FS_DATA_TYPE "
"and co.OBJECT_UID = co1.OBJECT_UID and co1.COLUMN_NUMBER = 0 "
"and (dt.DATETIMESTARTFIELD = co.DATETIME_START_FIELD) "
"and (dt.DATETIMEENDFIELD = co.DATETIME_END_FIELD) "
"and (ob.SCHEMA_NAME = '%s' or trim(ob.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_NAME = '%s' or trim(ob.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and (co.COLUMN_NAME = '%s' or trim(co.COLUMN_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_TYPE in ('BT' , 'VI') ) "
"and (trim(co.COLUMN_CLASS) not in ('S', 'M')) "
"and dt.APPLICATION_VERSION = %s "
"FOR READ UNCOMMITTED ACCESS order by 1, 2, 3, co.COLUMN_NUMBER ; ",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3], inputParam[4],
inputParam[5], inputParam[6]);
}
else
{
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select "
"cast('%s' as varchar(128) ) TABLE_CAT, "
"cast(trim(ob.SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM, "
"cast(trim(ob.OBJECT_NAME) as varchar(128) ) TABLE_NAME, "
"cast(trim(co.COLUMN_NAME) as varchar(128) ) COLUMN_NAME, "
"cast((case when co.FS_DATA_TYPE = 0 and co.character_set = 'UCS2' then -8 "
"when co.FS_DATA_TYPE = 64 and co.character_set = 'UCS2' then -9 else dt.DATA_TYPE end) as smallint) DATA_TYPE, "
"trim(dt.TYPE_NAME) TYPE_NAME, "
"cast((case when co.FS_DATA_TYPE = 0 and co.character_set = 'UCS2' then co.COLUMN_SIZE/2 "
"when co.FS_DATA_TYPE = 64 and co.character_set = 'UCS2' then co.COLUMN_SIZE/2 "
"when dt.USEPRECISION = -1 then co.COLUMN_SIZE when dt.USEPRECISION = -2 then co.COLUMN_PRECISION "
"when co.FS_DATA_TYPE = 192 then dt.USEPRECISION + 1 "
"when co.FS_DATA_TYPE >= 195 and co.FS_DATA_TYPE <= 207 then dt.USEPRECISION + 1 "
"else dt.USEPRECISION end) as integer) COLUMN_SIZE, "
"cast((case when dt.USELENGTH = -1 then co.COLUMN_SIZE when dt.USELENGTH = -2 then co.COLUMN_PRECISION "
"when dt.USELENGTH = -3 then co.COLUMN_PRECISION + 2 "
"else dt.USELENGTH end) as integer) BUFFER_LENGTH, "
"cast(co.COLUMN_SCALE as smallint) DECIMAL_DIGITS, "
"cast(dt.NUM_PREC_RADIX as smallint) NUM_PREC_RADIX, "
"cast(co.NULLABLE as smallint) NULLABLE, "
"cast('' as varchar(128)) REMARKS "
"from "
"TRAFODION.\"_MD_\".objects ob, "
"TRAFODION.\"_MD_\".columns co, "
"TRAFODION.\"_MD_\".columns co1, "
"(VALUES ("
"cast('BIGINT' as varchar(128)),cast(-5 as smallint), cast(19 as integer), cast (NULL as varchar(128)), cast (NULL as varchar(128)), "
"cast (NULL as varchar(128)), cast(1 as smallint), cast(0 as smallint), cast(2 as smallint) , cast(0 as smallint), cast(0 as smallint), "
"cast(0 as smallint), cast('LARGEINT' as varchar(128)), cast(NULL as smallint), cast(NULL as smallint), cast('SIGNED LARGEINT' as varchar(128)), cast(134 as integer), "
"cast(10 as smallint), cast(19 as integer), cast(20 as integer), cast(-402 as smallint), cast(NULL as smallint), cast(NULL as smallint), "
"cast(0 as smallint), cast(0 as smallint), cast(3 as smallint), cast(0 as smallint)), "
"('CHAR', 1, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'CHARACTER', NULL, NULL, 'CHARACTER', 0, NULL, -1, -1, 1, NULL, NULL, 0, 0, 3, 0), "
"('DATE', 91, 10, '{d ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'DATE', NULL, NULL, 'DATE', 192, NULL, 10, 6, 9, 1, NULL, 1, 3, 3, 0), "
"('DECIMAL', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'DECIMAL', 0, 18, 'SIGNED DECIMAL', 152, 10, -2, -3, 3, NULL, NULL, 0, 0, 3, 0), "
"('DECIMAL UNSIGNED', 3, 18, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'DECIMAL', 0, 18, 'UNSIGNED DECIMAL', 150, 10, -2, -3, -301, NULL, NULL, 0, 0, 3, 0), "
"('DOUBLE PRECISION', 8, 15, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'DOUBLE', NULL, NULL, 'DOUBLE', 143, 2, 54, -1, 8, NULL, NULL, 0, 0, 3, 0), "
"('INTEGER', 4, 10, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'INTEGER', NULL, NULL, 'SIGNED INTEGER', 132, 10, 10, -1, 4, NULL, NULL, 0, 0, 3, 0), "
"('INTEGER UNSIGNED', 4, 10, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'INTEGER', NULL, NULL, 'UNSIGNED INTEGER', 133, 10, 10, -1, -401, NULL, NULL, 0, 0, 3, 0), "
"('INTERVAL', 113, 0, '{INTERVAL ''', ''' MINUTE TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 205, NULL, 3, 34, 100, 13, 2, 5, 6, 3, 0), "
"('INTERVAL', 105, 0, '{INTERVAL ''', ''' MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 201, NULL, 0, 34, 100, 5, 2, 5, 5, 3, 0), "
"('INTERVAL', 101, 0, '{INTERVAL ''', ''' YEAR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 195, NULL, 0, 34, 100, 1, 2, 1, 1, 3, 0), "
"('INTERVAL', 106, 0, '{INTERVAL ''', ''' SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 204, NULL, 0, 34, 100, 6, 2, 6, 6, 3, 0), "
"('INTERVAL', 104, 0, '{INTERVAL ''', ''' HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 199, NULL, 0, 34, 100, 4, 2, 4, 4, 3, 0), "
"('INTERVAL', 107, 0, '{INTERVAL ''', ''' YEAR TO MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 197, NULL, 3, 34, 100, 7, 2, 1, 2, 3, 0), "
"('INTERVAL', 108, 0, '{INTERVAL ''', ''' DAY TO HOUR}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 200, NULL, 3, 34, 100, 8, 2, 3, 4, 3, 0), "
"('INTERVAL', 102, 0, '{INTERVAL ''', ''' MONTH}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 196, NULL, 0, 34, 100, 2, 2, 2, 2, 3, 0), "
"('INTERVAL', 111, 0, '{INTERVAL ''', ''' HOUR TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 202, NULL, 3, 34, 100, 11, 2, 4, 5, 3, 0), "
"('INTERVAL', 112, 0, '{INTERVAL ''', ''' HOUR TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 206, NULL, 6, 34, 100, 12, 2, 4, 6, 3, 0), "
"('INTERVAL', 110, 0, '{INTERVAL ''', ''' DAY TO SECOND}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 207, NULL, 9, 34, 100, 10, 2, 3, 6, 3, 0), "
"('INTERVAL', 109, 0, '{INTERVAL ''', ''' DAY TO MINUTE}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 203, NULL, 6, 34, 100, 9, 2, 3, 5, 3, 0), "
"('INTERVAL', 103, 0, '{INTERVAL ''', ''' DAY}', NULL, 1, 0, 2, 0, 0, NULL, 'INTERVAL', 0, 0, 'INTERVAL', 198, NULL, 0, 34, 100, 3, 2, 3, 3, 3, 0), "
"('NUMERIC', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 0, 0, 0, 'NUMERIC', 0, 128, 'SIGNED NUMERIC', 156, 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0), "
"('NUMERIC UNSIGNED', 2, 128, NULL, NULL, 'precision,scale', 1, 0, 2, 1, 0, 0, 'NUMERIC', 0, 128, 'UNSIGNED NUMERIC', 155, 10, -2, -3, 2, NULL, NULL, 0, 0, 3, 0), "
"('REAL', 7, 7, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'REAL', NULL, NULL, 'REAL', 142, 2, 22, -1, 7, NULL, NULL, 0, 0, 3, 0), "
"('SMALLINT', 5, 5, NULL, NULL, NULL, 1, 0, 2, 0, 0, 0, 'SMALLINT', NULL, NULL, 'SIGNED SMALLINT', 130, 10, 5, -1, 5, NULL, NULL, 0, 0, 3, 0), "
"('SMALLINT UNSIGNED', 5, 5, NULL, NULL, NULL, 1, 0, 2, 1, 0, 0, 'SMALLINT', NULL, NULL, 'UNSIGNED SMALLINT', 131, 10, 5, -1, -502, NULL, NULL, 0, 0, 3, 0), "
"('TIME', 92, 8, '{t ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIME', NULL, NULL, 'TIME', 192, NULL, 8, 6, 9, 2, NULL, 4, 6, 3, 0), "
"('TIMESTAMP', 93, 26, '{ts ''', '''}', NULL, 1, 0, 2, NULL, 0, NULL, 'TIMESTAMP', 0, 6, 'TIMESTAMP', 192, NULL, 19, 16, 9, 3, NULL, 1, 6, 3, 0), "
"('VARCHAR', 12, 32000, '''', '''', 'max length', 1, 1, 3, NULL, 0, NULL, 'VARCHAR', NULL, NULL, 'VARCHAR', 64, NULL, -1, -1, 12, NULL, NULL, 0, 0, 3, 0) "
" ) "
"dt(\"TYPE_NAME\", \"DATA_TYPE\", \"PREC\", \"LITERAL_PREFIX\", \"LITERAL_SUFFIX\", \"CREATE_PARAMS\", \"IS_NULLABLE\", \"CASE_SENSITIVE\", \"SEARCHABLE\", "
"\"UNSIGNED_ATTRIBUTE\", \"FIXED_PREC_SCALE\", \"AUTO_UNIQUE_VALUE\", \"LOCAL_TYPE_NAME\", \"MINIMUM_SCALE\", \"MAXIMUM_SCALE\", \"SQL_TYPE_NAME\", \"FS_DATA_TYPE\", "
"\"NUM_PREC_RADIX\", \"USEPRECISION\", \"USELENGTH\", \"SQL_DATA_TYPE\", \"SQL_DATETIME_SUB\", \"INTERVAL_PRECISION\", \"DATETIMESTARTFIELD\", "
"\"DATETIMEENDFIELD\", \"APPLICATION_VERSION\", \"TRANSLATION_ID\") "
"where ob.OBJECT_UID = co.OBJECT_UID "
"and dt.FS_DATA_TYPE = co.FS_DATA_TYPE "
"and co.OBJECT_UID = co1.OBJECT_UID and co1.COLUMN_NUMBER = 0 "
"and (dt.DATETIMESTARTFIELD = co.DATETIME_START_FIELD) "
"and (dt.DATETIMEENDFIELD = co.DATETIME_END_FIELD) "
"and (ob.SCHEMA_NAME = '%s' or trim(ob.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_NAME = '%s' or trim(ob.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and (co.COLUMN_NAME = '%s' or trim(co.COLUMN_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_TYPE in ('BT' , 'VI') ) "
"and (trim(co.COLUMN_CLASS) not in ('S', 'M')) "
"FOR READ UNCOMMITTED ACCESS order by 1, 2, 3, co.COLUMN_NUMBER ; ",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3], inputParam[4],
inputParam[5]);
}
}
break;
case SQL_API_SQLPRIMARYKEYS :
if ((!checkIfWildCard(catalogNm, catalogNmNoEsc) || !checkIfWildCard(schemaNm, schemaNmNoEsc) || !checkIfWildCard(tableNm, tableNmNoEsc)) && !metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
// strcpy((char *)catStmtLabel, "SQL_PRIMARYKEYS_ANSI_Q4");
if (strcmp(catalogNm,"") == 0)
strcpy(tableName1,SEABASE_MD_CATALOG);
else
strcpy(tableName1, catalogNm);
tableParam[0] = tableName1;
/*
strcpy(tableName2, SEABASE_MD_CATALOG);
strcat(tableName2, ".");
strcat(tableName2, SEABASE_MD_SCHEMA);
strcat(tableName2, ".");
strcat(tableName2, smdCatAPITablesList[OBJECTS]);
tableParam[1] = tableName2;
strcpy(tableName3, SEABASE_MD_CATALOG);
strcat(tableName3, ".");
strcat(tableName3, SEABASE_MD_SCHEMA);
strcat(tableName3, ".");
strcat(tableName3, smdCatAPITablesList[KEYS]);
tableParam[2] = tableName3;
tableParam[3] = NULL;
*/
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
inputParam[4] = NULL;
snprintf(CatalogQuery,sizeof(CatalogQuery),
"select "
"cast('%s' as varchar(128) ) TABLE_CAT,"
"cast(trim(ob.SCHEMA_NAME) as varchar(128) ) TABLE_SCHEM,"
"cast(trim(ob.OBJECT_NAME) as varchar(128) ) TABLE_NAME,"
"trim(ky.COLUMN_NAME) COLUMN_NAME,"
"cast((ky.keyseq_number) as smallint) KEY_SEQ,"
"trim(ob.OBJECT_NAME) PK_NAME "
" from TRAFODION.\"_MD_\".OBJECTS ob, "
"TRAFODION.\"_MD_\".KEYS ky "
" where (ob.SCHEMA_NAME = '%s' or "
" trim(ob.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
" and (ob.OBJECT_NAME = '%s' or "
" trim(ob.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
" and ob.OBJECT_UID = ky.OBJECT_UID and ky.COLUMN_NAME <> '_SALT_' "
" FOR READ UNCOMMITTED ACCESS order by 1, 2, 3, 5 ;",
tableParam[0], inputParam[0], inputParam[1],
inputParam[2], inputParam[3]);
break;
case SQL_API_SQLFOREIGNKEYS:
if ((!checkIfWildCard(catalogNm, catalogNmNoEsc) ||
!checkIfWildCard(schemaNm, schemaNmNoEsc) ||
!checkIfWildCard(tableNm, tableNmNoEsc)) &&
!metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
goto MapException;
}
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
char fkcatalogNmNoEsc[MAX_ANSI_NAME_LEN + 1];
char fkschemaNmNoEsc[MAX_ANSI_NAME_LEN + 1];
char fktableNmNoEsc[MAX_ANSI_NAME_LEN + 1];
char fkexpCatalogNm[MAX_ANSI_NAME_LEN + 1];
char fkexpSchemaNm[MAX_ANSI_NAME_LEN + 1];
char fkexpTableNm[MAX_ANSI_NAME_LEN + 1];
if (strcmp(fktableNm, "") == 0)
strcpy((char *)fktableNm, "%");
if (strcmp(fkschemaNm, "") == 0)
strcpy((char *)fkschemaNm, "%");
convertWildcard(metadataId, TRUE, fkcatalogNm, fkexpCatalogNm);
convertWildcardNoEsc(metadataId, TRUE, fkcatalogNm, fkcatalogNmNoEsc);
convertWildcard(metadataId, TRUE, fkschemaNm, fkexpSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, fkschemaNm, fkschemaNmNoEsc);
convertWildcard(metadataId, TRUE, fktableNm, fkexpTableNm);
convertWildcardNoEsc(metadataId, TRUE, fktableNm, fktableNmNoEsc);
snprintf(CatalogQuery, sizeof(CatalogQuery),
"select "
"cast(PKCO.CATALOG_NAME as varchar(128)) PKTABLE_CAT, "
"cast(PKCO.SCHEMA_NAME as varchar(128)) PKTABLE_SCHEM, "
"cast(PKCO.TABLE_NAME as varchar(128)) PKTABLE_NAME, "
"cast(PKCO.COLUMN_NAME as varchar(128)) PKCOLUMN_NAME, "
"cast(FKCO.CATALOG_NAME as varchar(128)) FKTABLE_CAT, "
"cast(PKCO.SCHEMA_NAME as varchar(128)) FKTABLE_SCHEM, "
"cast(FKCO.TABLE_NAME as varchar(128)) FKTABLE_NAME, "
"cast(FKCO.COLUMN_NAME as varchar(128)) FKCOLUMN_NAME, "
"cast(FKKV.ORDINAL_POSITION as smallint) KEY_SEQ, "
"cast(0 as smallint) update_rule, " // not support
"cast(0 as smallint) delete_rule, " // not support
"cast(FKKV.CONSTRAINT_NAME as varchar(128)) FK_NAME, "
"cast(PKKV.CONSTRAINT_NAME as varchar(128)) PK_NAME, "
"cast(0 as smallint) DEFERRABILITY " // not support
"from "
"TRAFODION.\"_MD_\".REF_CONSTRAINTS_VIEW rcv, "
"TRAFODION.\"_MD_\".KEYS_VIEW PKKV, "
"TRAFODION.\"_MD_\".KEYS_VIEW FKKV, "
"TRAFODION.\"_MD_\".COLUMNS_VIEW PKCO, "
"TRAFODION.\"_MD_\".COLUMNS_VIEW FKCO "
"where "
"FKKV.CONSTRAINT_NAME = rcv.CONSTRAINT_NAME "
"and PKKV.CONSTRAINT_NAME = rcv.UNIQUE_CONSTRAINT_NAME "
"and PKCO.TABLE_NAME = PKKV.TABLE_NAME "
"and FKCO.TABLE_NAME = FKKV.TABLE_NAME "
"and PKCO.COLUMN_NAME = PKKV.COLUMN_NAME "
"and FKCO.COLUMN_NAME = FKKV.COLUMN_NAME "
"and (PKCO.SCHEMA_NAME = '%s' or trim(PKCO.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (PKCO.TABLE_NAME = '%s' or trim(PKCO.TABLE_NAME) LIKE '%s' ESCAPE '\\') "
"and (FKCO.SCHEMA_NAME = '%s' or trim(FKCO.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (FKCO.TABLE_NAME = '%s' or trim(FKCO.TABLE_NAME) LIKE '%s' ESCAPE '\\') "
"FOR READ UNCOMMITTED ACCESS ORDER BY 1, 2, 3, 5, 6, 7, 9;",
schemaNmNoEsc, expSchemaNm,
tableNmNoEsc, expTableNm,
fkschemaNmNoEsc, fkexpSchemaNm,
fktableNmNoEsc, fkexpTableNm
);
break;
case SQL_API_SQLSTATISTICS:
if (!checkIfWildCard(catalogNm, catalogNmNoEsc) && !metadataId)
{
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_WILDCARD_NOT_SUPPORTED;
}
if (tableNm[0] != '$' && tableNm[0] != '\\')
{
if (strcmp(catalogNm, "") == 0)
strcpy(tableName1, SEABASE_MD_CATALOG);
else
strcpy(tableName1, catalogNm);
}
tableParam[0] = tableName1;
convertWildcard(metadataId, TRUE, schemaNm, expSchemaNm);
convertWildcardNoEsc(metadataId, TRUE, schemaNm, schemaNmNoEsc);
convertWildcard(metadataId, TRUE, tableNm, expTableNm);
convertWildcardNoEsc(metadataId, TRUE, tableNm, tableNmNoEsc);
inputParam[0] = schemaNmNoEsc;
inputParam[1] = expSchemaNm;
inputParam[2] = tableNmNoEsc;
inputParam[3] = expTableNm;
snprintf(CatalogQuery, sizeof(CatalogQuery),
"select "
"cast('%s' as varchar(128)) TABLE_CAT, "
"cast(trim(ob.SCHEMA_NAME) as varchar(128)) TABLE_SCHEM, "
"cast(trim(ob.OBJECT_NAME) as varchar(128)) TABLE_NAME, "
"cast(NULL as smallint) NON_UNIQUE, " // return NULL if TYPE is SQL_TABLE_STAT
"cast(NULL as varchar(128)) INDEX_QUALIFIER, " // return NULL if TYPE is SQL_TABLE_STAT
"cast(NULL as varchar(128)) INDEX_NAME, " // return NULL if TYPE is SQL_TABLE_STAT
"cast(0 as smallint) TYPE, " // TYPE is SQL_TABLE_STAT
"cast(NULL as smallint) ORDINAL_POSITION, " // return NULL if TYPE is SQL_TABLE_STAT
"cast(trim(co.COLUMN_NAME) as varchar(128)) COLUMN_NAME, "
"cast(NULL as char(1)) ASC_OR_DESC, " // return NULL if TYPE is SQL_TABLE_STAT
"cast(sb.rowcount as integer) CARDINALITY, " // number of rows
"cast(NULL as integer) PAGES, " // not support
"cast(NULL as varchar(128)) FILTER_CONDITION " // not support
"from "
"TRAFODION.\"_MD_\".OBJECTS ob, "
"TRAFODION.\"_MD_\".COLUMNS co, "
"TRAFODION.%s.sb_histograms sb "
"where "
"ob.OBJECT_UID = co.OBJECT_UID "
"and co.OBJECT_UID = sb.TABLE_UID "
"and co.COLUMN_NUMBER = sb.COLUMN_NUMBER "
"and sb.colcount = 1 "
"and (ob.SCHEMA_NAME = '%s' or trim(ob.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_NAME = '%s' or trim(ob.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob.OBJECT_TYPE in ('BT', 'VI')) "
"and (trim(co.COLUMN_CLASS) not in('S', 'M')) "
"union "
"select "
"cast('%s' as varchar(128)) TABLE_CAT, "
"cast(trim(ob_table.SCHEMA_NAME) as varchar(128)) TABLE_SCHEM, "
"cast(trim(ob_table.OBJECT_NAME) as varchar(128)) TABLE_NAME, "
"cast(case when idx.is_unique = 1 then 0 else 1 end as smallint) NON_UNIQUE, "
"cast(NULL as varchar(128)) INDEX_QUALIFIER, " // not support
"cast(trim(ob.OBJECT_NAME) as varchar(128)) INDEX_NAME, "
"cast(3 as smallint) TYPE, " // SQL_INDEX_OTHER
"cast(0 as smallint) ORDINAL_POSITION, "
"cast('' as varchar(128)) COLUMN_NAME, " // return an empty string if the expression cannot be determined.
"cast(NULL as char(1)) ASC_OR_DESC, " // not subsequent
"cast(NULL as integer) CARDINALITY, "
"cast(NULL as integer) PAGES, "
"cast(NULL as varchar(128)) FILTER_CONDITION "
"from "
"TRAFODION.\"_MD_\".OBJECTS ob, "
"TRAFODION.\"_MD_\".INDEXES idx, "
"TRAFODION.\"_MD_\".OBJECTS ob_table, "
"TRAFODION.\"_MD_\".TABLES tb "
"where "
"idx.BASE_TABLE_UID=tb.TABLE_UID "
"and idx.INDEX_UID=ob.OBJECT_UID "
"and idx.BASE_TABLE_UID=ob_table.OBJECT_UID "
"and (ob_table.SCHEMA_NAME = '%s' or trim(ob_table.SCHEMA_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob_table.OBJECT_NAME = '%s' or trim(ob_table.OBJECT_NAME) LIKE '%s' ESCAPE '\\') "
"and (ob_table.OBJECT_TYPE in ('BT', 'VI')) "
"%s "
"ORDER BY 1, 2, 3, 7, 9, 6 ;",
tableParam[0],
inputParam[0],
inputParam[0], inputParam[1],
inputParam[2], inputParam[3],
tableParam[0],
inputParam[0], inputParam[1],
inputParam[2], inputParam[3],
uniqueness == 1 ? "" : "and idx.is_unique=1"
);
break;
default :
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNSUPPORTED_SMD_API_TYPE;
break;
}
if (exception_->exception_nr == 0)
{
if ((QryCatalogSrvrStmt = getSrvrStmt(catStmtLabel, TRUE)) == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
"CATALOG APIs",
"Allocate Statement");
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
else
{
// Temporary solution - bypass checks on metadata tables
unsigned int savedParserFlags = 0;
SQL_EXEC_GetParserFlagsForExSqlComp_Internal(savedParserFlags);
try
{
SQL_EXEC_SetParserFlagsForExSqlComp_Internal(INTERNAL_QUERY_FROM_EXEUTIL);
retcode = QryCatalogSrvrStmt->ExecDirect(NULL, CatalogQuery, EXTERNAL_STMT, TYPE_SELECT, SQL_ASYNC_ENABLE_OFF, 0);
SQL_EXEC_AssignParserFlagsForExSqlComp_Internal(savedParserFlags);
if (retcode == SQL_ERROR)
{
ERROR_DESC_def *p_buffer = QryCatalogSrvrStmt->sqlError.errorList._buffer;
strncpy(RequestError, p_buffer->errorText,sizeof(RequestError) -1);
RequestError[sizeof(RequestError) - 1] = '\0';
SendEventMsg(MSG_SQL_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
p_buffer->sqlcode,
RequestError);
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.SQLError.errorList._length = QryCatalogSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = QryCatalogSrvrStmt->sqlError.errorList._buffer;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECDIRECT_FAILED;
}
} //try
catch (...)
{
SQL_EXEC_AssignParserFlagsForExSqlComp_Internal(savedParserFlags);
SendEventMsg(MSG_PROGRAMMING_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
1,
"Exception in executing Catalog API");
exception_->exception_nr = odbc_SQLSvc_GetSQLCatalogs_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECDIRECT_FAILED;
} // catch
}
}
if (exception_->exception_nr == 0)
{
QryCatalogSrvrStmt->sqlStmtType = TYPE_SELECT_CATALOG;
outputDesc->_length = QryCatalogSrvrStmt->outputDescList._length;
outputDesc->_buffer = QryCatalogSrvrStmt->outputDescList._buffer;
}
MapException:
// resource statistics
if (resStatSession != NULL)
{
if (exception_->exception_nr != 0)
resStatSession->totalCatalogErrors ++;
if (sqlWarning->_length != 0)
resStatSession->totalCatalogWarnings ++;
}
if (resStatStatement != NULL)
resStatStatement->catFlagOn = FALSE;
if (exception_->exception_nr != 0)
{
SRVR_STMT_HDL *pSrvrStmt = NULL;
if ((pSrvrStmt = getSrvrStmt(catStmtLabel, FALSE)) != NULL)
{
exception_->exception_nr = 0;
exception_->exception_detail = 0;
pSrvrStmt->rowsAffected = 0;
// CLEARDIAGNOSTICS(pSrvrStmt);
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
}
}
SRVRTRACE_EXIT(FILE_SME+14);
return;
}
/*
* Synchronous method function prototype for
* operation 'odbc_SQLSvc_InitializeDialogue'
*/
extern "C" void
odbc_SQLSvc_InitializeDialogue_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_InitializeDialogue_exc_ *exception_
, /* In */ const USER_DESC_def *userDesc
, /* In */ const CONNECTION_CONTEXT_def *inContext
, /* In */ DIALOGUE_ID_def dialogueId
, /* Out */ OUT_CONNECTION_CONTEXT_def *outContext
)
{
SRVRTRACE_ENTER(FILE_SME+15);
exception_->exception_nr = 0;
Int32 retcode = SQL_SUCCESS;
char userSid[MAX_TEXT_SID_LEN+1];
char pwd[387];
pwd[0] = '\0';
__int64 julian = 0;
char primary_rolename[MAX_ROLENAME_LEN+1];
char logon_rolename[MAX_ROLENAME_LEN+1];
short logon_roleLen = 0;
__int64 redefTime = 0;
Int32 authIDType = 0;
int encryption = 1;
int refresh = 0;
char* pPWD = (char*)userDesc->password._buffer;
bzero(primary_rolename, sizeof(primary_rolename));
// volatile int done = 0;
// while (!done) {
// sleep(10);
// }
userSid[0] = 0;
// #ifdef _TMP_SQ_SECURITY
UA_Status authErrorDetail;
DBUserAuth *userSession = DBUserAuth::GetInstance();
size_t passwordLen = 0;
CLIENT_INFO client_info;
PERFORMANCE_INFO performanceInfo;
AUTHENTICATION_INFO authenticationInfo;
if (userDesc->userDescType != AUTHENTICATED_USER_TYPE)
{
srvrGlobal->QSRoleName[0] = '\0';
int64 tempStartTime, tempEndTime = 0;
int64 loginStartTime = JULIANTIMESTAMP();
client_info.client_name = setinit.clientId;
client_info.client_user_name = setinit.clientUserName;
client_info.application_name = setinit.applicationId;
#ifndef _DEBUG
// Disable generation of cores in release version
struct rlimit rlim;
int limit, ret_rlimit;
// Get the current limit and save.
ret_rlimit = getrlimit(RLIMIT_CORE, &rlim);
limit = rlim.rlim_cur;
// Set the current limit to 0, disabling generation of core
rlim.rlim_cur = 0;
ret_rlimit = setrlimit(RLIMIT_CORE, &rlim);
#endif
retcode = decrypt_message(pPWD,primary_rolename);
tempStartTime = JULIANTIMESTAMP() ;
if (retcode == SECMXO_NO_ERROR)
{
size_t length = strlen(userDesc->userName);
for (size_t i = 0; i < length; i++)
userDesc->userName[i] = toupper(userDesc->userName[i]);
retcode = userSession->verify(userDesc->userName
,pPWD
,authErrorDetail
,authenticationInfo
,client_info
,performanceInfo
);
}
if (retcode == 0)
{
srvrGlobal->redefTime = authenticationInfo.usersInfo.redefTime;
strcpy(srvrGlobal->QSRoleName, "NONE");
}
#ifndef _DEBUG
// Revert to the previous setting for core generation
rlim.rlim_cur = limit;
ret_rlimit = setrlimit(RLIMIT_CORE, &rlim);
#endif
tempEndTime = JULIANTIMESTAMP();
setinit.ldapLoginTime = tempEndTime - tempStartTime;
setinit.totalLoginTime = tempEndTime - loginStartTime;
setinit.sqlUserTime = performanceInfo.sqlUserTime;
setinit.searchConnectionTime = performanceInfo.searchConnectionTime;
setinit.searchTime = performanceInfo.searchTime;
setinit.authenticationConnectionTime = performanceInfo.authenticationConnectionTime;
setinit.authenticationTime = performanceInfo.authenticationTime;
if (retcode != SECMXO_NO_ERROR)
{
exception_->exception_detail = retcode;
exception_->exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_;
srvrGlobal->QSRoleName[0] = '\0';
SETSECURITYERROR(retcode, &exception_->u.SQLError.errorList);
SRVRTRACE_EXIT(FILE_SME+15);
if (retcode == SECMXO_INTERNAL_ERROR_FATAL)
{
SendEventMsg(MSG_PROGRAMMING_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef,
1, "Security layer returned fatal error. Server exiting.");
exit(0);
}
return;
}
tempEndTime = JULIANTIMESTAMP();
setinit.totalLoginTime = tempEndTime - loginStartTime;
if (authenticationInfo.error == 0)
retcode = WSQL_EXEC_SetAuthID(userDesc->userName,
authenticationInfo.usersInfo.databaseUsername,
authenticationInfo.tokenKey,
authenticationInfo.tokenKeySize,
authenticationInfo.usersInfo.effectiveUserID,
authenticationInfo.usersInfo.sessionUserID);
else
{
bool bSQLMessageSet;
exception_->exception_detail = -8837;
if (authenticationInfo.errorDetail == 1)
{
setAuthenticationError(bSQLMessageSet,&(exception_->u.SQLError),userDesc->userName,false);
exception_->exception_nr = odbc_SQLSvc_InitializeDialogue_InvalidUser_exn_;
}
else
{
setAuthenticationError(bSQLMessageSet,&(exception_->u.SQLError),userDesc->userName,true);
exception_->exception_nr = odbc_SQLSvc_InitializeDialogue_SQLError_exn_;
ERROR_DESC_def *sqlError = exception_->u.SQLError.errorList._buffer;
SendEventMsg(MSG_SQL_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER, srvrGlobal->srvrObjRef,
3, ODBCMX_SERVER, sqlError->sqlstate, sqlError->errorText);
}
}
}
SRVRTRACE_EXIT(FILE_SME+15);
return;
}
/*
* Synchronous method function for
* operation 'odbc_SQLSvc_TerminateDialogue'
*/
extern "C" void
odbc_SQLSvc_TerminateDialogue_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_TerminateDialogue_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
)
{
SRVRTRACE_ENTER(FILE_SME+16);
exception_->exception_nr = 0;
//odbc_SQLSvc_EndTransaction_exc_ endTransactionException;
//ERROR_DESC_LIST_def sqlWarning;
// Rollback the transaction, Don't bother to check if autocommit is on or off, since SQL
// doesn't check for it
// When there is no transaction outstanding, SQL would give an error and ignore this error.
if (WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0)
EXECDIRECT("ROLLBACK WORK");
//odbc_SQLSvc_EndTransaction_sme_(objtag_, call_id_, &endTransactionException,
// dialogueId, SQL_ROLLBACK
// ,&sqlWarning
// );
// resource statistics resStatSession->end() is called in SRVR::BreakDialogue()
//if (resStatSession != NULL)
// resStatSession->end();
//end rs
SRVRTRACE_EXIT(FILE_SME+16);
return;
}
/*
*
* Synchronous method function for
* operation 'odbc_SQLSvc_SetConnectionOption'
*/
extern "C" void
odbc_SQLSvc_SetConnectionOption_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLSvc_SetConnectionOption_exc_ *exception_
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ IDL_short connectionOption
, /* In */ Int32 optionValueNum
, /* In */ IDL_string optionValueStr
, /* Out */ ERROR_DESC_LIST_def *sqlWarning
)
{
SRVRTRACE_ENTER(FILE_SME+17);
char stmtLabel[MAX_STMT_LABEL_LEN+1];
char sqlString[256];
Int32 retcode = 0;
bool sqlStringNeedsExecution = true;
SRVR_STMT_HDL *pSrvrStmt = NULL;
SRVR_STMT_HDL *resStmt;
int64 local_xid;
UINT xid_length = sizeof(local_xid);
char buffer[100];
char valueStr[MAX_SQL_IDENTIFIER_LEN+1];
char schemaValueStr[MAX_SQL_IDENTIFIER_LEN+MAX_SQL_IDENTIFIER_LEN+5+1]; // 5 for quotes + dot
char *CatalogNameTypeStr = '\0';
char *endPtr;
exception_->exception_nr = CEE_SUCCESS;
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
memset(sqlString, 0, 256);
// Given a label find out the SRVR_STMT_HDL
if ((pSrvrStmt = getSrvrStmt("STMT_INTERNAL_1", TRUE)) == NULL)
{
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
return;
}
// clientLCID = srvrGlobal->clientLCID, is done during the creation of statement
// handle. However, the in InitializeDialog(), the correct value of
// srvrGlobal->clientLCID is set only after the creation of statement handle.
// This causes an incorrect clientLCID value (the initalized value zero) to
// be set on pSrvrStmt. This value dectates the character set translations.
// An incorrect value of clientLCID causes incorrect translations for all the
// executions on that statement. The fix is to set the correct value after the
// creation of the statement.
pSrvrStmt->setclientLCID(srvrGlobal->clientLCID);
switch (connectionOption) {
//Special Case//
case SQL_ACCESSMODE_AND_ISOLATION:
switch (optionValueNum) {
case SQL_TXN_READ_UNCOMMITTED:
//Modified it, since on setting at DS level to uncommitted the subsequent connections were not going through.
strcpy(sqlString, "SET TRANSACTION READ ONLY, ISOLATION LEVEL READ UNCOMMITTED");
break;
case SQL_TXN_READ_COMMITTED:
//Modified it, since on setting at DS level to uncommitted the subsequent connections were not going through.
strcpy(sqlString, "SET TRANSACTION READ WRITE, ISOLATION LEVEL READ COMMITTED");
break;
default:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_NUM;
return;
}
break;
//Special Case//
case SQL_ACCESS_MODE:
strcpy(sqlString, "SET TRANSACTION ");
switch (optionValueNum) {
case SQL_MODE_READ_WRITE:
strcat(sqlString, "READ WRITE");
break;
case SQL_MODE_READ_ONLY:
if ((srvrGlobal->EnvironmentType & MXO_MSACCESS_1997) || (srvrGlobal->EnvironmentType & MXO_MSACCESS_2000)) // change this to fix MS Access problem. Since it insert, update and delete while SQL_ACCESS_MODE is SQL_MODE_READ_ONLY.
strcat(sqlString, "READ WRITE");
else
strcat(sqlString, "READ ONLY");
break;
case SQL_MODE_NULL:
// Need to find out from profile what is the access Mode
// at present return ParamError. Note there is no break
default:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_NUM;
return;
}
break;
case SQL_TXN_ISOLATION:
strcpy(sqlString, "SET TRANSACTION ISOLATION LEVEL ");
switch (optionValueNum) {
case SQL_TXN_READ_UNCOMMITTED:
strcat(sqlString, "READ UNCOMMITTED");
break;
case SQL_TXN_READ_COMMITTED:
strcat(sqlString, "READ COMMITTED");
break;
case SQL_TXN_REPEATABLE_READ:
strcat(sqlString, "REPEATABLE READ");
break;
case SQL_TXN_SERIALIZABLE:
strcat(sqlString, "SERIALIZABLE");
break;
default:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_NUM;
return;
}
break;
case SQL_ATTR_ENLIST_IN_DTC:
#ifdef TIP_DEFINED
if (srvrGlobal->tip_gateway != NULL) {
tip_close(srvrGlobal->tip_gateway);
srvrGlobal->tip_gateway = NULL;
}
// Check for non-DTC transaction
if (optionValueNum == NULL){
SetTipUrl((IDL_char *)NULL);
SetLocalXid(NULL, 0);
exception_->exception_nr = 0;
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
return;
}
// Check for previous DTC transaction
if(GetTipUrl() != (IDL_char *)NULL){
SetTipUrl((IDL_char *)NULL);
SetLocalXid(NULL, 0);
}
retcode = tip_open(&(srvrGlobal->tip_gateway));
if (retcode != TIPOK){
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_OPEN_TIP_GATEWAY_FAILED;
switch (retcode) {
case TIPNOTCONNECTED:
SendEventMsg (MSG_SRVR_DTC_TIP_NOTCONNECTED,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
case TIPNOTCONFIGURED:
SendEventMsg (MSG_SRVR_DTC_TIP_NOTCONFIGURED,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
default:
SendEventMsg (MSG_SRVR_DTC_TIP_ERROR,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
}
return;
}
strncpy(buffer, optionValueStr, sizeof(buffer));
buffer[sizeof(buffer)-1] = 0;
retcode = tip_pull(srvrGlobal->tip_gateway,
(IDL_char *)buffer,
&local_xid,
xid_length);
if (retcode != TIPOK){
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_PULL_TIP_FAILED;
switch (retcode) {
case TIPNOTCONNECTED:
SendEventMsg (MSG_SRVR_DTC_TIP_NOTCONNECTED,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
case TIPNOTCONFIGURED:
SendEventMsg (MSG_SRVR_DTC_TIP_NOTCONFIGURED,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
default:
SendEventMsg (MSG_SRVR_DTC_TIP_ERROR,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
break;
}
return;
}
SetTipUrl((IDL_char *)buffer);
SetLocalXid(local_xid,
xid_length);
exception_->exception_nr = 0;
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
#else
// RS we'll return an error just in case a user tries to use TIP until we get the libraries
sqlWarning->_length = 0;
sqlWarning->_buffer = NULL;
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_OPEN_TIP_GATEWAY_FAILED;
SendEventMsg (MSG_SRVR_DTC_TIP_ERROR,
EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
0);
#endif
return;
case SET_AUTOBEGIN:
strcpy(sqlString, "SET TRANSACTION AUTOBEGIN ON");
break;
case SQL_AUTOCOMMIT:
// if a change is required
sqlStringNeedsExecution = false;
if ( ((srvrGlobal->bAutoCommitOn == TRUE) && (optionValueNum == 0 ))
|| ((srvrGlobal->bAutoCommitOn == FALSE) && (optionValueNum == 1 )) )
{
//check for active txn, if yes commit them
if ((srvrGlobal->bAutoCommitOn == FALSE) && (WSQL_EXEC_Xact(SQLTRANS_STATUS,NULL) == 0))
retcode = pSrvrStmt->ExecDirect(NULL, "COMMIT WORK", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0);
if (retcode != SQL_ERROR)
{
if (optionValueNum)
{
if( SQL_SUCCESS == (retcode = pSrvrStmt->ExecDirect(NULL, "SET TRANSACTION AUTOCOMMIT ON", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0)))
srvrGlobal->bAutoCommitOn = TRUE;
}
else
{
if( SQL_SUCCESS == (retcode = pSrvrStmt->ExecDirect(NULL, "SET TRANSACTION AUTOCOMMIT OFF", INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0)))
srvrGlobal->bAutoCommitOn = FALSE;
}
}
}
else
return;
break;
case SET_CATALOG:
case SQL_ATTR_CURRENT_CATALOG:
{
sqlStringNeedsExecution = false;
int len = 0;
bool defaultCatalog = false;
bool isDoubleQuoted = false;
char* tempSqlString = NULL;
if (optionValueStr == NULL || (optionValueStr != NULL && optionValueStr[0] == '\0'))
{
len = strlen(ODBCMX_DEFAULT_CATALOG);
defaultCatalog = true;
}
else
len = strlen(optionValueStr);
tempSqlString = new char[len+20];
if (tempSqlString == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR, EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, "SET CATALOG");
exit(0);
}
if (!defaultCatalog && optionValueStr[0] == '"' && optionValueStr[len-1] == '"')
isDoubleQuoted = true;
strcpy(tempSqlString, "SET CATALOG ");
if (!isDoubleQuoted)
strcat(tempSqlString, "'");
if (defaultCatalog)
strcat(tempSqlString, ODBCMX_DEFAULT_CATALOG);
else
strcat(tempSqlString, optionValueStr);
if (!isDoubleQuoted)
strcat(tempSqlString, "'");
if( SQL_SUCCESS == (retcode = pSrvrStmt->ExecDirect(NULL, tempSqlString, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0)))
{
if (defaultCatalog)
strcpy(srvrGlobal->DefaultCatalog, ODBCMX_DEFAULT_CATALOG);
else if (isDoubleQuoted)
{
strncpy(srvrGlobal->DefaultCatalog, optionValueStr+1, len-2);
srvrGlobal->DefaultCatalog[len-2] = '\0';
}
else
strcpy(srvrGlobal->DefaultCatalog, optionValueStr);
}
delete [] tempSqlString;
}
break;
case SET_SCHEMA:
{
if (optionValueStr == NULL || (optionValueStr != NULL && optionValueStr[0] == '\0'))
sprintf(schemaValueStr, "%s.%s", ODBCMX_DEFAULT_CATALOG, ODBCMX_DEFAULT_SCHEMA);
else
{
if (strlen(optionValueStr) < sizeof(schemaValueStr))
strcpy(schemaValueStr, optionValueStr);
else
{
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_STR;
return;
}
}
strcpy(sqlString, "SET SCHEMA ");
strncat(sqlString, schemaValueStr, sizeof(sqlString));
sqlString[sizeof(sqlString)-1] = 0;
}
break;
case RESET_DEFAULTS:
strcpy(sqlString, "CONTROL QUERY DEFAULT * RESET");
break;
case RESET_RESET_DEFAULTS:
strcpy(sqlString, "CONTROL QUERY DEFAULT * RESET RESET");
break;
case CUT_CONTROLQUERYSHAPE:
strcpy(sqlString, "CONTROL QUERY SHAPE CUT");
break;
case BEGIN_SESSION:
if(optionValueStr != NULL && strlen(optionValueStr) > 0)
sprintf(sqlString,"SET SESSION DEFAULT SQL_SESSION 'BEGIN:%0.200s';",optionValueStr);
else
strcpy(sqlString, "SET SESSION DEFAULT SQL_SESSION 'BEGIN';");
break;
// Added the below workaround for volatile table SQL problem
// Not called any more from initailizeDialog since executor fixed it.
case SET_SESSION_USERNAME:
sprintf( sqlString, "CONTROL QUERY DEFAULT session_id '%s'", srvrGlobal->sessionId );
break;
case END_SESSION:
strcpy(sqlString, "SET SESSION DEFAULT SQL_SESSION 'END'");
break;
case SET_CATALOGNAMETYPE:
strcpy(sqlString, "SET NAMETYPE ANSI");
break;
case SET_SETANDCONTROLSTMTS:
break;
case SET_ODBC_PROCESS:
strcpy(sqlString, "CONTROL QUERY DEFAULT ODBC_PROCESS 'TRUE'");
break;
case SET_JDBC_PROCESS:
strcpy(sqlString, "CONTROL QUERY DEFAULT JDBC_PROCESS 'TRUE'");
break;
case SET_INFER_NCHAR:
strcpy(sqlString, "CONTROL QUERY DEFAULT INFER_CHARSET 'ON'");
break;
case SET_EXPLAIN_PLAN:
strcpy(sqlString,"CONTROL QUERY DEFAULT GENERATE_EXPLAIN 'ON'");
break;
case SQL_ATTR_ROWSET_RECOVERY:
if (optionValueNum)
srvrGlobal->EnvironmentType |= MXO_ROWSET_ERROR_RECOVERY;
else
srvrGlobal->EnvironmentType &= (0xFFFF-MXO_ROWSET_ERROR_RECOVERY);
return;
case SQL_ATTR_CONCURRENCY:
strcpy(sqlString, "CONTROL QUERY DEFAULT READONLY_CURSOR ");
switch (optionValueNum)
{
case SQL_CONCUR_READ_ONLY:
strcat(sqlString, "'TRUE'");
break;
case SQL_CONCUR_LOCK:
case SQL_CONCUR_ROWVER:
case SQL_CONCUR_VALUES:
strcat(sqlString, "'FALSE'");
break;
default:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_OPTION_VALUE_NUM;
return;
}
break;
case JDBC_ATTR_CONN_IDLE_TIMEOUT:
if (srvrGlobal->drvrVersion.componentId == JDBC_DRVR_COMPONENT)
{
if (optionValueNum > JDBC_DATASOURCE_CONN_IDLE_TIMEOUT)
srvrGlobal->javaConnIdleTimeout = optionValueNum;
else
srvrGlobal->javaConnIdleTimeout = JDBC_DATASOURCE_CONN_IDLE_TIMEOUT;
}
else
{
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_CONNECTION_OPTION;
}
return;
case CONN_IDLE_TIMER_RESET:
// this connection attribute is JDBC exclusive, NDCS just need to recognize it and does nothing
return;
break;
// Set priority of DP2 relative to master executor/ESPs
case CONTROL_TABLE_PRIORITY:
sprintf(sqlString,"CONTROL TABLE * PRIORITY_DELTA '%d'", optionValueNum);
break;
case SET_STATISTICS:
strcpy(sqlString,"CONTROL QUERY DEFAULT detailed_statistics 'PERTABLE'");
break;
// JDBC sets this connection attribute to set the Proxy syntax for SPJ result sets
case SET_SPJ_ENABLE_PROXY:
if(optionValueNum)
srvrGlobal->bSpjEnableProxy = true;
else
srvrGlobal->bSpjEnableProxy = false;
return;
break;
case SQL_ATTR_JOIN_UDR_TRANSACTION:
errno = 0;
endPtr = NULL;
srvrGlobal->spjTxnId = strtoll(optionValueStr, &endPtr, 10);
//srvrGlobal->spjTxnId = atoll(optionValueStr);
if( errno == 0 )
{
#ifdef SPJ_TXN_TEST
sprintf(msg, "SQL_ATTR_JOIN_UDR_TRANSACTION %lld", srvrGlobal->spjTxnId);
SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, msg);
#endif
retcode = JOINTRANSACTION( srvrGlobal->spjTxnId );
if(retcode != 0)
{
sprintf(buffer,"Transaction join failed with error %d",retcode);
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = buffer;
}
else
srvrGlobal->bspjTxnJoined = TRUE;
}
else
{
sprintf(buffer,"Unable to retrieve transaction ID. Error %d",errno);
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = buffer;
}
return;
break;
case SQL_ATTR_SUSPEND_UDR_TRANSACTION:
endPtr = NULL;
errno = 0;
srvrGlobal->spjTxnId = strtoll(optionValueStr, &endPtr, 10);
//srvrGlobal->spjTxnId = atoll(optionValueStr);
if( errno == 0 )
{
#ifdef SPJ_TXN_TEST
sprintf(msg, "SQL_ATTR_SUSPEND_UDR_TRANSACTION %lld", srvrGlobal->spjTxnId);
SendEventMsg(MSG_SERVER_TRACE_INFO, EVENTLOG_INFORMATION_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 1, msg);
#endif
retcode = SUSPENDTRANSACTION( (short*)&(srvrGlobal->spjTxnId) );
if(retcode != 0)
{
sprintf(buffer,"Transaction suspend failed with error %d",retcode);
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = buffer;
}
else
srvrGlobal->bspjTxnJoined = FALSE;
}
else
{
sprintf(buffer,"Unable to retrieve transaction ID. Error %d",errno);
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = buffer;
}
return;
break;
case SET_INPUT_CHARSET:
snprintf(sqlString,200,"CONTROL QUERY DEFAULT input_charset '%s'", getCharsetStr(srvrGlobal->clientLCID));
break;
case SET_TERMINAL_CHARSET:
snprintf(sqlString,200,"CONTROL QUERY DEFAULT terminal_charset '%s'", getCharsetStr(srvrGlobal->clientErrorLCID));
break;
case SET_NVCI_PROCESS:
sprintf(sqlString, "CONTROL QUERY DEFAULT NVCI_PROCESS 'ON'");
break;
case WMS_QUERY_MONITORING:
strcpy(sqlString, "CONTROL QUERY DEFAULT WMS_QUERY_MONITORING 'OFF'");
break;
default:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_INVALID_CONNECTION_OPTION;
return;
}
if(sqlStringNeedsExecution)
{
if (connectionOption == SET_SETANDCONTROLSTMTS)
retcode = pSrvrStmt->ExecDirect(NULL, optionValueStr, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0);
else
retcode = pSrvrStmt->ExecDirect(NULL, sqlString, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0);
}
switch (retcode)
{
case SQL_SUCCESS:
exception_->exception_nr = 0;
// Ignore estimatedCost and rowsAffected
sqlWarning->_length = pSrvrStmt->sqlWarning._length;
sqlWarning->_buffer = pSrvrStmt->sqlWarning._buffer;
break;
case SQL_ERROR:
if(pSrvrStmt->sqlError.errorList._buffer->sqlcode == -15371) // Executor dev should be treated as warning.
{
exception_->exception_nr = 0;
SendEventMsg(MSG_SQL_WARNING, EVENTLOG_WARNING_TYPE,
srvrGlobal->nskProcessInfo.processId, ODBCMX_SERVER,
srvrGlobal->srvrObjRef, 3, "SQL/MX", "15371", sqlString);
}
else
{
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_SQLError_exn_;
exception_->u.SQLError.errorList._length = pSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = pSrvrStmt->sqlError.errorList._buffer;
}
break;
case PROGRAM_ERROR:
exception_->exception_nr = odbc_SQLSvc_SetConnectionOption_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_SETCONNECTOPTION_FAILED;
default:
break;
}
SRVRTRACE_EXIT(FILE_SME+17);
return;
}
extern "C" void
odbc_SQLSrvr_FetchPerf_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ Int32 *returnCode
, /* In */ DIALOGUE_ID_def dialogueId
, /* In */ const IDL_char *stmtLabel
, /* In */ Int32 maxRowCnt
, /* In */ Int32 maxRowLen
, /* In */ IDL_short sqlAsyncEnable
, /* In */ Int32 queryTimeout
, /* Out */ Int32 *rowsAffected
, /* Out */ Int32 *outValuesFormat
, /* Out */ SQL_DataValue_def *outputDataValue
, /* Out */ Int32 *sqlWarningOrErrorLength
, /* Out */ BYTE *&sqlWarningOrError
)
{
SRVRTRACE_ENTER(FILE_SME+8);
SRVR_STMT_HDL *pSrvrStmt = NULL;
SQLRETURN rc = SQL_SUCCESS;
int outputDataOffset = 0;
*returnCode = SQL_SUCCESS;
if (maxRowCnt < 0)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Invalid Row Count", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
pSrvrStmt = getSrvrStmt(stmtLabel, FALSE);
if (pSrvrStmt == NULL)
{
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Statement Label not found", sqlWarningOrErrorLength, sqlWarningOrError);
}
else
{
if (pSrvrStmt->sqlWarningOrErrorLength > 0 &&
pSrvrStmt->sqlWarningOrError != NULL)
{
delete pSrvrStmt->sqlWarningOrError;
}
pSrvrStmt->sqlWarningOrErrorLength = 0;
pSrvrStmt->sqlWarningOrError = NULL;
}
}
if (*returnCode == SQL_SUCCESS)
{
// limit on maxRowsFetched from WMS
if (pSrvrStmt->sqlStmtType != TYPE_SELECT_CATALOG)
{
if (srvrGlobal->maxRowsFetched != 0 && pSrvrStmt->m_bDoneWouldLikeToExecute)
{
if (srvrGlobal->maxRowsFetched <= pSrvrStmt->m_curRowsFetched)
{
WSQL_EXEC_CloseStmt(&pSrvrStmt->stmt);
WSQL_EXEC_ClearDiagnostics(&pSrvrStmt->stmt);
pSrvrStmt->isClosed = true;
pSrvrStmt->bFirstSqlBulkFetch = false;
pSrvrStmt->m_curRowsFetched = 0;
// *returnCode = SQL_NO_DATA_FOUND;
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "The limit for maximum rows to be returned for a query, as set by the administrator, was exceeded", sqlWarningOrErrorLength, sqlWarningOrError);
goto ret;
}
else
{
if (pSrvrStmt->bFirstSqlBulkFetch == true)
pSrvrStmt->m_curRowsFetched = 0;
if (pSrvrStmt->m_curRowsFetched + maxRowCnt <= srvrGlobal->maxRowsFetched )
pSrvrStmt->maxRowCnt = maxRowCnt;
else
{
pSrvrStmt->maxRowCnt = srvrGlobal->maxRowsFetched - pSrvrStmt->m_curRowsFetched;
if (pSrvrStmt->maxRowCnt <= 0)
pSrvrStmt->maxRowCnt = 1;
}
}
}
else
pSrvrStmt->maxRowCnt = maxRowCnt;
}
else
pSrvrStmt->maxRowCnt = maxRowCnt;
pSrvrStmt->maxRowLen = maxRowLen;
// resource statistics
if (resStatStatement != NULL && pSrvrStmt->isClosed == FALSE && pSrvrStmt->bFetchStarted == FALSE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
pSrvrStmt->bFetchStarted = TRUE;
pSrvrStmt->inState = inState = STMTSTAT_FETCH;
inSqlStmtType = TYPE_UNKNOWN;
inEstimatedCost = 0;
inQueryId = NULL;
inSqlString = NULL;
inErrorStatement = 0;
inWarningStatement = 0;
inRowCount = 0;
inErrorCode = 0;
inSqlError = NULL;
inSqlErrorLength = 0;
/*resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt->sqlUniqueQueryID,
pSrvrStmt->cost_info,
pSrvrStmt->comp_stats_info,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);*/
resStatStatement->start(inState,
pSrvrStmt->sqlQueryType,
stmtLabel,
pSrvrStmt,
inEstimatedCost,
&pSrvrStmt->m_need_21036_end_msg);
}
// end rs
if (pSrvrStmt->sqlStmtType != TYPE_SELECT_CATALOG)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
if(pSrvrStmt->outputDataValue._length > 0 &&
pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._length = 0;
pSrvrStmt->outputDataValue._buffer = NULL;
if (pSrvrStmt->isClosed)
{
pSrvrStmt->m_curRowsFetched = 0;
pSrvrStmt->bFirstSqlBulkFetch = false;
*returnCode = SQL_NO_DATA_FOUND;
goto ret;
}
pSrvrStmt->currentMethod = odbc_SQLSvc_FetchPerf_ldx_;
// if (pSrvrStmt->sqlBulkFetchPossible && (pSrvrStmt->sqlQueryType == SQL_SELECT_NON_UNIQUE || pSrvrStmt->sqlQueryType == SQL_SP_RESULT_SET))
if (srvrGlobal->drvrVersion.buildId & ROWWISE_ROWSET)
{
*outValuesFormat = ROWWISE_ROWSETS;
rc = FETCH2bulk(pSrvrStmt);
if (pSrvrStmt->rowsAffected > 0)
{
if(pSrvrStmt->outputDataValue._length == 0 && pSrvrStmt->outputDataValue._buffer == NULL)
{
outputDataValue->_buffer = pSrvrStmt->outputDescVarBuffer;
outputDataValue->_length = pSrvrStmt->outputDescVarBufferLen*pSrvrStmt->rowsAffected;
}
else
{
outputDataValue->_buffer = pSrvrStmt->outputDataValue._buffer;
outputDataValue->_length = pSrvrStmt->outputDataValue._length;
}
}
else
{
outputDataValue->_buffer = NULL;
outputDataValue->_length = 0;
}
}
else
{
*outValuesFormat = COLUMNWISE_ROWSETS;
//pSrvrStmt->maxRowCnt = maxRowCnt;
//pSrvrStmt->maxRowLen = maxRowLen;
rc = FETCHPERF(pSrvrStmt, outputDataValue);
}
switch (rc)
{
case ODBC_RG_WARNING:
case SQL_SUCCESS_WITH_INFO:
*returnCode = SQL_SUCCESS_WITH_INFO;
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
*rowsAffected = pSrvrStmt->rowsAffected;
if (*rowsAffected > 0)
pSrvrStmt->m_curRowsFetched += *rowsAffected;
break;
case SQL_SUCCESS:
*returnCode = SQL_SUCCESS;
*rowsAffected = pSrvrStmt->rowsAffected;
if (*rowsAffected > 0)
pSrvrStmt->m_curRowsFetched += *rowsAffected;
break;
case SQL_STILL_EXECUTING:
*returnCode = SQL_STILL_EXECUTING;
break;
case SQL_INVALID_HANDLE:
*returnCode = SQL_INVALID_HANDLE;
break;
case SQL_NO_DATA_FOUND:
pSrvrStmt->bFirstSqlBulkFetch = false;
*returnCode = SQL_NO_DATA_FOUND;
break;
case SQL_ERROR:
pSrvrStmt->bFirstSqlBulkFetch = false;
*returnCode = SQL_ERROR;
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
break;
case PROGRAM_ERROR:
pSrvrStmt->bFirstSqlBulkFetch = false;
*returnCode = SQL_ERROR;
GETMXCSWARNINGORERROR(-1, "HY000", "Fetch Failed", sqlWarningOrErrorLength, sqlWarningOrError);
break;
default:
break;
}
}
else
{ // Catalog APIs
outputDataOffset = *(int*)pSrvrStmt->outputDataValue.pad_to_offset_8_;
*outValuesFormat = COLUMNWISE_ROWSETS;
rc = FETCHPERF(pSrvrStmt, &pSrvrStmt->outputDataValue);
if (pSrvrStmt->sqlError.errorList._buffer != NULL)
{
*returnCode = SQL_ERROR;
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
if (pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._buffer = NULL;
pSrvrStmt->outputDataValue._length = 0;
}
else if (pSrvrStmt->rowsAffected == 0 || pSrvrStmt->rowsAffected == -1)
{
if (pSrvrStmt->bSQLMessageSet)
pSrvrStmt->cleanupSQLMessage();
pSrvrStmt->outputDataValue._buffer = NULL;
pSrvrStmt->outputDataValue._length = 0;
*(int*)pSrvrStmt->outputDataValue.pad_to_offset_8_=0;
outputDataOffset = 0;
pSrvrStmt->InternalStmtClose(SQL_CLOSE);
*returnCode = SQL_NO_DATA_FOUND;
}
else
{
*rowsAffected = pSrvrStmt->rowsAffected;
if (pSrvrStmt->sqlWarning._length != 0)
{
*returnCode = SQL_SUCCESS_WITH_INFO;
GETSQLWARNINGORERROR2(pSrvrStmt);
*sqlWarningOrErrorLength = pSrvrStmt->sqlWarningOrErrorLength;
sqlWarningOrError = pSrvrStmt->sqlWarningOrError;
}
else
{
char *tmpByte = (char*)&pSrvrStmt->outputDataValue._length;
for(int i=0; i<sizeof(pSrvrStmt->outputDataValue.pad_to_offset_8_); i++) {
pSrvrStmt->outputDataValue.pad_to_offset_8_[i] = *tmpByte;
tmpByte++;
}
*returnCode = SQL_SUCCESS;
}
pSrvrStmt->rowsAffected = 0;
}
outputDataValue->_length = pSrvrStmt->outputDataValue._length - outputDataOffset;
outputDataValue->_buffer = pSrvrStmt->outputDataValue._buffer + outputDataOffset;
}
ret:
if (*returnCode != SQL_SUCCESS &&
*returnCode != SQL_SUCCESS_WITH_INFO)
{
if (pSrvrStmt->outputDataValue._buffer != NULL)
delete pSrvrStmt->outputDataValue._buffer;
pSrvrStmt->outputDataValue._length = 0;
pSrvrStmt->outputDataValue._buffer = NULL;
}
if (pSrvrStmt->sqlNewQueryType == SQL_SP_RESULT_SET)
{
if (pSrvrStmt->callStmtHandle->isClosed == true && *returnCode == SQL_NO_DATA_FOUND || *returnCode == SQL_ERROR)
{
pSrvrStmt->callStmtHandle->inState = STMTSTAT_CLOSE;
// Fix for CR 6059
if( resStatStatement != NULL )
resStatStatement->setStatistics(pSrvrStmt->callStmtHandle);
}
}
else if (resStatStatement != NULL && pSrvrStmt->bFetchStarted == TRUE &&
(*returnCode == SQL_NO_DATA_FOUND || *returnCode == SQL_ERROR ||
((*returnCode == SQL_SUCCESS || *returnCode == SQL_SUCCESS_WITH_INFO) && *rowsAffected < maxRowCnt)))
{
resStatStatement->setStatistics(pSrvrStmt);
}
}
// resource statistics
if (resStatStatement != NULL && pSrvrStmt != NULL && pSrvrStmt->isClosed == TRUE && pSrvrStmt->bFetchStarted == TRUE && pSrvrStmt->stmtType == EXTERNAL_STMT)
{
if (*returnCode == SQL_ERROR && pSrvrStmt->sqlWarningOrError != NULL)
{
inErrorCode = *(Int32 *)(pSrvrStmt->sqlWarningOrError+8);
inSqlError = (char*)pSrvrStmt->sqlWarningOrError + 16;
inSqlErrorLength =*(Int32 *)(pSrvrStmt->sqlWarningOrError + 12);
}
pSrvrStmt->bFetchStarted = FALSE;
Int32 inMaxRowCnt = 0;
Int32 inMaxRowLen = 0;
inMaxRowCnt = maxRowCnt;
inMaxRowLen = maxRowLen;
if (*returnCode != SQL_SUCCESS &&
*returnCode != SQL_SUCCESS_WITH_INFO)
inErrorStatement ++;
else
setStatisticsFlag = FALSE;
if (*returnCode == SQL_SUCCESS_WITH_INFO)
inWarningStatement ++;
if (*returnCode == SQL_NO_DATA_FOUND)
{
inErrorStatement = 0;
inWarningStatement = 0;
setStatisticsFlag = TRUE;
}
inQueryId = pSrvrStmt->sqlUniqueQueryID;
inSqlQueryType = pSrvrStmt->sqlQueryType;
resStatStatement->setStatisticsFlag(setStatisticsFlag);
resStatStatement->end(inState,
inSqlQueryType,
inSqlStmtType,
inQueryId,
inEstimatedCost,
inSqlString,
inErrorStatement,
inWarningStatement,
inRowCount,
inErrorCode,
resStatSession,
inSqlErrorLength,
inSqlError,
pSrvrStmt,
&pSrvrStmt->m_need_21036_end_msg,
pSrvrStmt->sqlNewQueryType,
pSrvrStmt->isClosed);
}
//end rs
SRVRTRACE_EXIT(FILE_SME+8);
return;
} // odbc_SQLSrvr_FetchPerf_sme_()
extern "C" void
odbc_SQLSrvr_ExtractLob_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def *call_id_
, /* Out */ odbc_SQLsrvr_ExtractLob_exc_ *exception_
, /* In */ IDL_short extractLobAPI
, /* In */ IDL_string lobHandle
, /* In */ IDL_long_long &lobLength
, /* Out */ IDL_long_long &extractLen
, /* Out */ BYTE *& extractData
)
{
char LobExtractQuery[1000] = {0};
char RequestError[200] = {0};
SRVR_STMT_HDL *QryLobExtractSrvrStmt = NULL;
if ((QryLobExtractSrvrStmt = getSrvrStmt("MXOSRVR_EXTRACRTLOB", TRUE)) == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
"EXTRACT LOB APIs",
"Allocate Statement");
exception_->exception_nr = odbc_SQLsrvr_ExtractLob_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
switch (extractLobAPI) {
case 0:
extractData = NULL;
snprintf(LobExtractQuery, sizeof(LobExtractQuery), "EXTRACT LOBLENGTH(LOB'%s') LOCATION %Ld", lobHandle, (Int64)&lobLength);
break;
case 1:
extractData = new BYTE[extractLen + 1];
if (extractData == NULL)
{
exception_->exception_nr = odbc_SQLsrvr_ExtractLob_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_BUFFER_ALLOC_FAILED;
}
snprintf(LobExtractQuery, sizeof(LobExtractQuery), "EXTRACT LOBTOBUFFER(LOB'%s', LOCATION %Ld, SIZE %Ld)", lobHandle, (Int64)extractData, &extractLen);
break;
case 102:
extractLen = 0;
extractData = NULL;
snprintf(LobExtractQuery, sizeof(LobExtractQuery), "EXTRACT LOBTOBUFFER(LOB'%s', LOCATION %Ld, SIZE %Ld)", lobHandle, (Int64)extractData, &extractLen);
break;
default:
return ;
}
try
{
short retcode = QryLobExtractSrvrStmt->ExecDirect(NULL, LobExtractQuery, EXTERNAL_STMT, TYPE_CALL, SQL_ASYNC_ENABLE_OFF, 0);
if (retcode == SQL_ERROR)
{
ERROR_DESC_def *p_buffer = QryLobExtractSrvrStmt->sqlError.errorList._buffer;
strncpy(RequestError, p_buffer->errorText, sizeof(RequestError) - 1);
SendEventMsg(MSG_SQL_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
p_buffer->sqlcode,
RequestError);
exception_->exception_nr = odbc_SQLsrvr_ExtractLob_ParamError_exn_;
exception_->u.SQLError.errorList._length = QryLobExtractSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = QryLobExtractSrvrStmt->sqlError.errorList._buffer;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
}
catch (...)
{
SendEventMsg(MSG_PROGRAMMING_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
1,
//"Exception in executing EXTRACT LOBTOBUFFER");
"Exception in executing EXTRACT LOBLENGTH");
exception_->exception_nr = odbc_SQLsrvr_ExtractLob_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECDIRECT_FAILED;
}
}
extern "C" void
odbc_SQLSrvr_UpdateLob_sme_(
/* In */ CEE_tag_def objtag_
, /* In */ const CEE_handle_def * call_id_
, /* In */ odbc_SQLSvc_UpdateLob_exc_ * exception_
, /* In */ IDL_short lobUpdateType
, /* In */ IDL_string lobHandle
, /* In */ IDL_long_long totalLength
, /* In */ IDL_long_long offset
, /* In */ IDL_long_long length
, /* In */ BYTE * data)
{
char lobUpdateQuery[1000] = {0};
char RequestError[200] = {0};
SRVR_STMT_HDL * QryLobUpdateSrvrStmt = NULL;
if ((QryLobUpdateSrvrStmt = getSrvrStmt("MXOSRVR_UPDATELOB", TRUE)) == NULL)
{
SendEventMsg(MSG_MEMORY_ALLOCATION_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
"LOB UPDATE APIs",
"Allocate Statement");
exception_->exception_nr = odbc_SQLSvc_UpdateLob_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_UNABLE_TO_ALLOCATE_SQL_STMT;
}
if (offset == 0)
{
snprintf(lobUpdateQuery, sizeof(lobUpdateQuery), "UPDATE LOB (LOB'%s', LOCATION %Ld, SIZE %Ld)", lobHandle, (Int64)data, length);
}
else
{
snprintf(lobUpdateQuery, sizeof(lobUpdateQuery), "UPDATE LOB (LOB'%s', LOCATION %Ld, SIZE %Ld, APPEND)", lobHandle, (Int64)data, length);
}
short retcode = 0;
try
{
retcode = QryLobUpdateSrvrStmt->ExecDirect(NULL, lobUpdateQuery, INTERNAL_STMT, TYPE_UNKNOWN, SQL_ASYNC_ENABLE_OFF, 0);
if (retcode == SQL_ERROR)
{
ERROR_DESC_def * p_buffer = QryLobUpdateSrvrStmt->sqlError.errorList._buffer;
strncpy(RequestError, p_buffer->errorText, sizeof(RequestError) - 1);
SendEventMsg(MSG_SQL_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
2,
p_buffer->sqlcode,
RequestError);
exception_->exception_nr = odbc_SQLSvc_UpdateLob_ParamError_exn_;
exception_->u.SQLError.errorList._length = QryLobUpdateSrvrStmt->sqlError.errorList._length;
exception_->u.SQLError.errorList._buffer = QryLobUpdateSrvrStmt->sqlError.errorList._buffer;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
}
catch (...)
{
SendEventMsg(MSG_PROGRAMMING_ERROR,
EVENTLOG_ERROR_TYPE,
srvrGlobal->nskProcessInfo.processId,
ODBCMX_SERVER,
srvrGlobal->srvrObjRef,
1,
"Exception in executing UPDATE_LOB");
exception_->exception_nr = odbc_SQLSvc_UpdateLob_ParamError_exn_;
exception_->u.ParamError.ParamDesc = SQLSVC_EXCEPTION_EXECUTE_FAILED;
}
if (QryLobUpdateSrvrStmt != NULL) {
QryLobUpdateSrvrStmt->Close(SQL_DROP);
}
}
//========================================================================
//LCOV_EXCL_START
short qrysrvc_GetAdaptiveSegment()
{
static ADAPTIVE_SEGMENT_DATA asd = {CATCHER, ASOPER_ALLOCATE, -1};
_cc_status cc;
short error;
//short pHandle[10];
TPT_DECL(pHandle);
unsigned short wcount;
SB_Tag_Type tag;
Int32 timeout = AS_TIMEOUT;
int size;
if (srvrGlobal->bWMS_AdaptiveSegment == false)
return -1;
if ((error = getProcessHandle(srvrGlobal->QSProcessName,
TPT_REF(pHandle))) != 0)
{
if (srvrGlobal->fnumAS != -1)
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
return -1;
}
if (srvrGlobal->fnumAS == -1 || 2 != PROCESSHANDLE_COMPARE_(TPT_REF(pHandle), TPT_REF(srvrGlobal->pASHandle)))
{
if (srvrGlobal->fnumAS != -1)
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
// bits <1> ON - nowait
short option = 0x4000;
error = FILE_OPEN_(srvrGlobal->QSProcessName
, strlen(srvrGlobal->QSProcessName)
, &srvrGlobal->fnumAS
, 0 //access
, 0 //exclusion
, 1 //nowait_depth
, 0 //sync-or-receive-depth
, option //options
);
if (error == 0)
{
cc = AWAITIOX(&srvrGlobal->fnumAS,OMITREF,OMITREF,OMITREF,timeout);
if (_status_lt(cc))
FILE_GETINFO_ (srvrGlobal->fnumAS, &error);
else
error = 0;
}
if (error == 0)
{
if ((error = getProcessHandle(srvrGlobal->QSProcessName,
TPT_REF(srvrGlobal->pASHandle))) != 0)
error = 1;
}
if (error)
{
if (srvrGlobal->fnumAS != -1) //timeout
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
return -1;
}
}
size = sizeof(asd);
short fnum = srvrGlobal->fnumAS;
cc = WRITEREADX( fnum,
(char*)&asd,
size,
size,
&wcount,
tag);
if (_status_lt(cc))
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
return -1;
}
cc = AWAITIOX(&fnum,OMITREF,&wcount,&tag,timeout);
if (_status_lt(cc))
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
return -1;
}
return asd.segment;
}
void AllocateAdaptiveSegment(SRVR_STMT_HDL *pSrvrStmt)
{
if (srvrGlobal->bWMS_AdaptiveSegment == false)
return;
if (pSrvrStmt->m_isAdaptiveSegmentAllocated)
{
if (pSrvrStmt->m_adaptive_segment != -1)
ClearAdaptiveSegment(pSrvrStmt->m_adaptive_segment);
}
pSrvrStmt->m_isAdaptiveSegmentAllocated = false;
pSrvrStmt->m_adaptive_segment = qrysrvc_GetAdaptiveSegment();
if (pSrvrStmt->m_adaptive_segment != -1)
pSrvrStmt->m_isAdaptiveSegmentAllocated = true;
if (pSrvrStmt->m_adaptive_segment != srvrGlobal->lastCQDAdaptiveSegment)
{
char AffinityCQD[64];
sprintf(AffinityCQD, "CONTROL QUERY DEFAULT AFFINITY_VALUE '%d'", pSrvrStmt->m_adaptive_segment);
EXECDIRECT(AffinityCQD);
srvrGlobal->lastCQDAdaptiveSegment = pSrvrStmt->m_adaptive_segment;
}
}
void DeallocateAdaptiveSegment(SRVR_STMT_HDL *pSrvrStmt)
{
if (srvrGlobal->bWMS_AdaptiveSegment == false)
return;
if (pSrvrStmt->m_isAdaptiveSegmentAllocated)
{
if (pSrvrStmt->m_adaptive_segment != -1)
ClearAdaptiveSegment(pSrvrStmt->m_adaptive_segment);
pSrvrStmt->m_isAdaptiveSegmentAllocated = false;
}
}
void ClearAdaptiveSegment(short adapiveSeg)
{
static ADAPTIVE_SEGMENT_DATA asd = {CATCHER, ASOPER_INIT, -1};
_cc_status cc;
short error;
//short pHandle[10];
TPT_DECL(pHandle);
unsigned short wcount;
SB_Tag_Type tag;
Int32 timeout = AS_TIMEOUT;
if (srvrGlobal->bWMS_AdaptiveSegment == false)
return;
if ((error = getProcessHandle(srvrGlobal->QSProcessName,
TPT_REF(pHandle))) != 0)
{
if (srvrGlobal->fnumAS != -1)
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
return;
}
if (adapiveSeg == -1)
{
asd.operation = ASOPER_DEALLOCATE_ALL;
}
else
{
asd.operation = ASOPER_DEALLOCATE;
asd.segment = adapiveSeg;
}
if (srvrGlobal->fnumAS == -1 || 2 != PROCESSHANDLE_COMPARE_(TPT_REF(pHandle),TPT_REF(srvrGlobal->pASHandle)))
{
if (srvrGlobal->fnumAS != -1)
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
// bits <1> ON - nowait
short option = 0x4000;
error = FILE_OPEN_(srvrGlobal->QSProcessName
, strlen(srvrGlobal->QSProcessName)
, &srvrGlobal->fnumAS
, 0 //access
, 0 //exclusion
, 1 //nowait_depth
, 0 //sync-or-receive-depth
, option //options
);
if (error == 0)
{
cc = AWAITIOX(&srvrGlobal->fnumAS,OMITREF,OMITREF,OMITREF,timeout);
if (_status_lt(cc))
FILE_GETINFO_ (srvrGlobal->fnumAS, &error);
else
error = 0;
}
if (error == 0)
{
if ((error = getProcessHandle(srvrGlobal->QSProcessName,
TPT_REF(srvrGlobal->pASHandle))) != 0)
error = 1;
}
if (error)
{
if (srvrGlobal->fnumAS != -1) //timeout
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
}
if (srvrGlobal->fnumAS != -1)
{
cc = WRITEREADX( srvrGlobal->fnumAS,
(char*)&asd,
sizeof(asd),
sizeof(asd),
&wcount,
tag);
if (_status_lt(cc))
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
return;
}
cc = AWAITIOX(&(srvrGlobal->fnumAS),OMITREF,&wcount,&tag,timeout);
if (_status_lt(cc))
{
FILE_CLOSE_(srvrGlobal->fnumAS);
srvrGlobal->fnumAS = -1;
}
}
}
//LCOV_EXCL_STOP
//==========================================================
static void setAuthenticationError(
bool & bSQLMessageSet,
odbc_SQLSvc_SQLError * SQLError,
const char * externalUsername,
bool isInternalError)
{
const char authErrorMessageHeader[] = "*** ERROR[8837] Invalid username or password";
const char authInternalErrorMessageHeader[] = "*** ERROR[8837] Internal error occurred";
char strNow[TIMEBUFSIZE + 1];
kdsCreateSQLErrorException(bSQLMessageSet,SQLError,1);
size_t messageHeaderLength;
if (isInternalError)
messageHeaderLength = strlen(authInternalErrorMessageHeader);
else
messageHeaderLength = strlen(authErrorMessageHeader);
size_t messageLength = (messageHeaderLength + 1) * 4 + TIMEBUFSIZE;
char *message = new char[messageLength];
if (isInternalError)
strcpy(message,authInternalErrorMessageHeader);
else
strcpy(message,authErrorMessageHeader);
strcat(message,". User: ");
strcat(message,externalUsername);
time_t now = time(NULL);
bzero(strNow,sizeof(strNow));
strftime(strNow,sizeof(strNow)," [%Y-%m-%d %H:%M:%S]", localtime(&now));
strcat(message,strNow);
kdsCopySQLErrorExceptionAndRowCount(SQLError,message,-8837," ",-1);
delete message;
}
| 1 | 20,635 | Surround code issue. RequestError may not be null terminated when the RequestError size is less than the length of the string in p_buffer->errorText. Also, this can cause core dump due to segment violation if length of errorText is less than the size of RequestBuffer. | apache-trafodion | cpp |
@@ -43,6 +43,11 @@ const (
AccountTrieRootKey = "accountTrieRoot"
)
+var (
+ // AccountMaxVersionPrefix is for account history
+ AccountMaxVersionPrefix = []byte("vp.")
+)
+
type (
// Factory defines an interface for managing states
Factory interface { | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package factory
import (
"context"
"fmt"
"math/big"
"strconv"
"sync"
"github.com/pkg/errors"
"go.uber.org/zap"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/vote/candidatesutil"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/db/trie"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/state"
)
const (
// AccountKVNameSpace is the bucket name for account trie
AccountKVNameSpace = "Account"
// CandidateKVNameSpace is the bucket name for candidate data storage
CandidateKVNameSpace = "Candidate"
// CurrentHeightKey indicates the key of current factory height in underlying DB
CurrentHeightKey = "currentHeight"
// AccountTrieRootKey indicates the key of accountTrie root hash in underlying DB
AccountTrieRootKey = "accountTrieRoot"
)
type (
// Factory defines an interface for managing states
Factory interface {
lifecycle.StartStopper
// Accounts
Balance(string) (*big.Int, error)
Nonce(string) (uint64, error) // Note that Nonce starts with 1.
AccountState(string) (*state.Account, error)
RootHash() hash.Hash256
RootHashByHeight(uint64) (hash.Hash256, error)
Height() (uint64, error)
NewWorkingSet() (WorkingSet, error)
Commit(WorkingSet) error
// Candidate pool
CandidatesByHeight(uint64) ([]*state.Candidate, error)
State(hash.Hash160, interface{}) error
AddActionHandlers(...protocol.ActionHandler)
}
// factory implements StateFactory interface, tracks changes to account/contract and batch-commits to DB
factory struct {
lifecycle lifecycle.Lifecycle
mutex sync.RWMutex
currentChainHeight uint64
accountTrie trie.Trie // global state trie
dao db.KVStore // the underlying DB for account/contract storage
actionHandlers []protocol.ActionHandler // the handlers to handle actions
timerFactory *prometheustimer.TimerFactory
}
)
// Option sets Factory construction parameter
type Option func(*factory, config.Config) error
// PrecreatedTrieDBOption uses pre-created trie DB for state factory
func PrecreatedTrieDBOption(kv db.KVStore) Option {
return func(sf *factory, cfg config.Config) (err error) {
if kv == nil {
return errors.New("Invalid empty trie db")
}
sf.dao = kv
return nil
}
}
// DefaultTrieOption creates trie from config for state factory
func DefaultTrieOption() Option {
return func(sf *factory, cfg config.Config) (err error) {
dbPath := cfg.Chain.TrieDBPath
if len(dbPath) == 0 {
return errors.New("Invalid empty trie db path")
}
cfg.DB.DbPath = dbPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
sf.dao = db.NewBoltDB(cfg.DB)
return nil
}
}
// InMemTrieOption creates in memory trie for state factory
func InMemTrieOption() Option {
return func(sf *factory, cfg config.Config) (err error) {
sf.dao = db.NewMemKVStore()
return nil
}
}
// NewFactory creates a new state factory
func NewFactory(cfg config.Config, opts ...Option) (Factory, error) {
sf := &factory{
currentChainHeight: 0,
}
for _, opt := range opts {
if err := opt(sf, cfg); err != nil {
log.S().Errorf("Failed to execute state factory creation option %p: %v", opt, err)
return nil, err
}
}
dbForTrie, err := db.NewKVStoreForTrie(AccountKVNameSpace, sf.dao)
if err != nil {
return nil, errors.Wrap(err, "failed to create db for trie")
}
if sf.accountTrie, err = trie.NewTrie(
trie.KVStoreOption(dbForTrie),
trie.RootKeyOption(AccountTrieRootKey),
); err != nil {
return nil, errors.Wrap(err, "failed to generate accountTrie from config")
}
sf.lifecycle.Add(sf.accountTrie)
timerFactory, err := prometheustimer.New(
"iotex_statefactory_perf",
"Performance of state factory module",
[]string{"topic", "chainID"},
[]string{"default", strconv.FormatUint(uint64(cfg.Chain.ID), 10)},
)
if err != nil {
log.L().Error("Failed to generate prometheus timer factory.", zap.Error(err))
}
sf.timerFactory = timerFactory
return sf, nil
}
func (sf *factory) Start(ctx context.Context) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
if err := sf.dao.Start(ctx); err != nil {
return err
}
return sf.lifecycle.OnStart(ctx)
}
func (sf *factory) Stop(ctx context.Context) error {
sf.mutex.Lock()
defer sf.mutex.Unlock()
if err := sf.dao.Stop(ctx); err != nil {
return err
}
return sf.lifecycle.OnStop(ctx)
}
// AddActionHandlers adds action handlers to the state factory
func (sf *factory) AddActionHandlers(actionHandlers ...protocol.ActionHandler) {
sf.mutex.Lock()
defer sf.mutex.Unlock()
sf.actionHandlers = append(sf.actionHandlers, actionHandlers...)
}
//======================================
// account functions
//======================================
// Balance returns balance
func (sf *factory) Balance(addr string) (*big.Int, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
account, err := sf.accountState(addr)
if err != nil {
return nil, err
}
return account.Balance, nil
}
// Nonce returns the Nonce if the account exists
func (sf *factory) Nonce(addr string) (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
account, err := sf.accountState(addr)
if err != nil {
return 0, err
}
return account.Nonce, nil
}
// account returns the confirmed account state on the chain
func (sf *factory) AccountState(addr string) (*state.Account, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.accountState(addr)
}
// RootHash returns the hash of the root node of the state trie
func (sf *factory) RootHash() hash.Hash256 {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.rootHash()
}
// RootHashByHeight returns the hash of the root node of the state trie at a given height
func (sf *factory) RootHashByHeight(blockHeight uint64) (hash.Hash256, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
data, err := sf.dao.Get(AccountKVNameSpace, []byte(fmt.Sprintf("%s-%d", AccountTrieRootKey, blockHeight)))
if err != nil {
return hash.ZeroHash256, err
}
var rootHash hash.Hash256
copy(rootHash[:], data)
return rootHash, nil
}
// Height returns factory's height
func (sf *factory) Height() (uint64, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
height, err := sf.dao.Get(AccountKVNameSpace, []byte(CurrentHeightKey))
if err != nil {
return 0, errors.Wrap(err, "failed to get factory's height from underlying DB")
}
return byteutil.BytesToUint64(height), nil
}
func (sf *factory) NewWorkingSet() (WorkingSet, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return NewWorkingSet(sf.currentChainHeight, sf.dao, sf.rootHash(), sf.actionHandlers)
}
// Commit persists all changes in RunActions() into the DB
func (sf *factory) Commit(ws WorkingSet) error {
if ws == nil {
return errors.New("working set doesn't exist")
}
sf.mutex.Lock()
defer sf.mutex.Unlock()
timer := sf.timerFactory.NewTimer("Commit")
defer timer.End()
if sf.currentChainHeight != ws.Version() {
// another working set with correct version already committed, do nothing
return fmt.Errorf(
"current state height %d doesn't match working set version %d",
sf.currentChainHeight,
ws.Version(),
)
}
if err := ws.Commit(); err != nil {
return errors.Wrap(err, "failed to commit working set")
}
// Update chain height and root
sf.currentChainHeight = ws.Height()
h := ws.RootHash()
if err := sf.accountTrie.SetRootHash(h[:]); err != nil {
return errors.Wrap(err, "failed to commit working set")
}
return nil
}
//======================================
// Candidate functions
//======================================
// CandidatesByHeight returns array of Candidates in candidate pool of a given height
func (sf *factory) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
var candidates state.CandidateList
// Load Candidates on the given height from underlying db
candidatesKey := candidatesutil.ConstructKey(height)
err := sf.state(candidatesKey, &candidates)
log.L().Debug(
"CandidatesByHeight",
zap.Uint64("height", height),
zap.Any("candidates", candidates),
zap.Error(err),
)
if errors.Cause(err) == nil {
if len(candidates) > 0 {
return candidates, nil
}
err = state.ErrStateNotExist
}
return nil, errors.Wrapf(
err,
"failed to get state of candidateList for height %d",
height,
)
}
// State returns a confirmed state in the state factory
func (sf *factory) State(addr hash.Hash160, state interface{}) error {
sf.mutex.RLock()
defer sf.mutex.RUnlock()
return sf.state(addr, state)
}
//======================================
// private trie constructor functions
//======================================
func (sf *factory) rootHash() hash.Hash256 {
return hash.BytesToHash256(sf.accountTrie.RootHash())
}
func (sf *factory) state(addr hash.Hash160, s interface{}) error {
data, err := sf.accountTrie.Get(addr[:])
if err != nil {
if errors.Cause(err) == trie.ErrNotExist {
return errors.Wrapf(state.ErrStateNotExist, "state of %x doesn't exist", addr)
}
return errors.Wrapf(err, "error when getting the state of %x", addr)
}
if err := state.Deserialize(s, data); err != nil {
return errors.Wrapf(err, "error when deserializing state data into %T", s)
}
return nil
}
func (sf *factory) accountState(encodedAddr string) (*state.Account, error) {
// TODO: state db shouldn't serve this function
addr, err := address.FromString(encodedAddr)
if err != nil {
return nil, errors.Wrap(err, "error when getting the pubkey hash")
}
pkHash := hash.BytesToHash160(addr.Bytes())
var account state.Account
if err := sf.state(pkHash, &account); err != nil {
if errors.Cause(err) == state.ErrStateNotExist {
account = state.EmptyAccount()
return &account, nil
}
return nil, errors.Wrapf(err, "error when loading state of %x", pkHash)
}
return &account, nil
}
| 1 | 19,199 | `AccountMaxVersionPrefix` is a global variable (from `gochecknoglobals`) | iotexproject-iotex-core | go |
@@ -32,7 +32,6 @@ import (
apicommon "go.temporal.io/api/common/v1"
"go.temporal.io/api/enums/v1"
- apinamespace "go.temporal.io/api/namespace/v1"
apireplication "go.temporal.io/api/replication/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/api/workflowservice/v1" | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package replication
import (
"context"
"errors"
"fmt"
"time"
apicommon "go.temporal.io/api/common/v1"
"go.temporal.io/api/enums/v1"
apinamespace "go.temporal.io/api/namespace/v1"
apireplication "go.temporal.io/api/replication/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/api/workflowservice/v1"
"go.temporal.io/sdk/activity"
"go.temporal.io/sdk/temporal"
"go.temporal.io/sdk/workflow"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
)
const (
forceReplicationWorkflowName = "force-replication"
namespaceHandoverWorkflowName = "namespace-handover"
listExecutionPageSize = 1000
minimumAllowedLaggingSeconds = 5
minimumHandoverTimeoutSeconds = 30
)
type (
ForceReplicationParams struct {
Namespace string
SkipAfterTime time.Time // skip workflows that are updated after this time
ConcurrentActivityCount int32
RemoteCluster string // remote cluster name
}
NamespaceHandoverParams struct {
Namespace string
RemoteCluster string
// how far behind on replication is allowed for remote cluster before handover is initiated
AllowedLaggingSeconds int
// how long to wait for handover to complete before rollback
HandoverTimeoutSeconds int
}
activities struct {
historyShardCount int32
executionManager persistence.ExecutionManager
namespaceRegistry namespace.Registry
historyClient historyservice.HistoryServiceClient
frontendClient workflowservice.WorkflowServiceClient
logger log.Logger
}
genReplicationForShardRange struct {
BeginShardID int32 // inclusive
EndShardID int32 // inclusive
NamespaceID string // only generate replication tasks for workflows in this namespace
SkipAfterTime time.Time // skip workflows whose LastUpdateTime is after this time
}
genReplicationForShard struct {
ShardID int32
NamespaceID string
SkipAfterTime time.Time
PageToken []byte
Index int
}
heartbeatProgress struct {
ShardID int32
PageToken []byte
Index int
}
metadataRequest struct {
Namespace string
}
metadataResponse struct {
ShardCount int32
NamespaceID string
}
replicationStatus struct {
MaxReplicationTaskIds map[int32]int64 // max replication task id for each shard.
}
waitReplicationRequest struct {
ShardCount int32
RemoteCluster string // remote cluster name
WaitForTaskIds map[int32]int64 // remote acked replication task needs to pass this id
AllowedLagging time.Duration // allowed remote acked lagging
}
updateStateRequest struct {
Namespace string // move this namespace into Handover state
NewState enums.NamespaceState
}
updateActiveClusterRequest struct {
Namespace string // move this namespace into Handover state
ActiveCluster string
}
waitHandoverRequest struct {
ShardCount int32
Namespace string
RemoteCluster string // remote cluster name
}
)
var (
historyServiceRetryPolicy = common.CreateHistoryServiceRetryPolicy()
persistenceRetryPolicy = common.CreateHistoryServiceRetryPolicy()
)
func ForceReplicationWorkflow(ctx workflow.Context, params ForceReplicationParams) error {
if len(params.Namespace) == 0 {
return errors.New("InvalidArgument: Namespace is required")
}
if len(params.RemoteCluster) == 0 {
return errors.New("InvalidArgument: RemoteCluster is required")
}
if params.ConcurrentActivityCount <= 0 {
params.ConcurrentActivityCount = 1
}
retryPolicy := &temporal.RetryPolicy{
InitialInterval: time.Second,
MaximumInterval: time.Second * 10,
}
// ** Step 1, Get cluster metadata **
ao := workflow.ActivityOptions{
StartToCloseTimeout: time.Second * 10,
RetryPolicy: retryPolicy,
}
ctx1 := workflow.WithActivityOptions(ctx, ao)
var a *activities
var metadataResp metadataResponse
metadataRequest := metadataRequest{Namespace: params.Namespace}
err := workflow.ExecuteActivity(ctx1, a.GetMetadata, metadataRequest).Get(ctx1, &metadataResp)
if err != nil {
return err
}
// ** Step 2, Force replication **
ao2 := workflow.ActivityOptions{
StartToCloseTimeout: time.Hour * 10,
HeartbeatTimeout: time.Second * 30,
RetryPolicy: retryPolicy,
}
ctx2 := workflow.WithActivityOptions(ctx, ao2)
concurrentCount := params.ConcurrentActivityCount
shardCount := metadataResp.ShardCount
skipAfter := params.SkipAfterTime
if skipAfter.IsZero() {
skipAfter = workflow.Now(ctx2)
}
var futures []workflow.Future
batchSize := (shardCount + concurrentCount - 1) / concurrentCount
for beginShardID := int32(1); beginShardID <= shardCount; beginShardID += batchSize {
endShardID := beginShardID + batchSize - 1
if endShardID > shardCount {
endShardID = shardCount
}
rangeRequest := genReplicationForShardRange{
BeginShardID: beginShardID,
EndShardID: endShardID,
NamespaceID: metadataResp.NamespaceID,
SkipAfterTime: skipAfter,
}
future := workflow.ExecuteActivity(ctx2, a.GenerateReplicationTasks, rangeRequest)
futures = append(futures, future)
}
for _, f := range futures {
if err := f.Get(ctx2, nil); err != nil {
return err
}
}
return nil
}
func NamespaceHandoverWorkflow(ctx workflow.Context, params NamespaceHandoverParams) error {
// validate input params
if len(params.Namespace) == 0 {
return errors.New("InvalidArgument: Namespace is required")
}
if len(params.RemoteCluster) == 0 {
return errors.New("InvalidArgument: RemoteCluster is required")
}
if params.AllowedLaggingSeconds <= minimumAllowedLaggingSeconds {
params.AllowedLaggingSeconds = minimumAllowedLaggingSeconds
}
if params.HandoverTimeoutSeconds <= minimumHandoverTimeoutSeconds {
params.HandoverTimeoutSeconds = minimumHandoverTimeoutSeconds
}
retryPolicy := &temporal.RetryPolicy{
InitialInterval: time.Second,
MaximumInterval: time.Second,
BackoffCoefficient: 1,
}
ao := workflow.ActivityOptions{
StartToCloseTimeout: time.Second * 10,
RetryPolicy: retryPolicy,
}
ctx = workflow.WithActivityOptions(ctx, ao)
var a *activities
// ** Step 1, Get cluster metadata **
var metadataResp metadataResponse
metadataRequest := metadataRequest{Namespace: params.Namespace}
err := workflow.ExecuteActivity(ctx, a.GetMetadata, metadataRequest).Get(ctx, &metadataResp)
if err != nil {
return err
}
// ** Step 2, get current replication status **
var repStatus replicationStatus
err = workflow.ExecuteActivity(ctx, a.GetMaxReplicationTaskIDs).Get(ctx, &repStatus)
if err != nil {
return err
}
// ** Step 3, wait remote cluster to catch up on replication tasks
ao3 := workflow.ActivityOptions{
StartToCloseTimeout: time.Hour,
HeartbeatTimeout: time.Second * 10,
RetryPolicy: retryPolicy,
}
ctx3 := workflow.WithActivityOptions(ctx, ao3)
waitRequest := waitReplicationRequest{
ShardCount: metadataResp.ShardCount,
RemoteCluster: params.RemoteCluster,
AllowedLagging: time.Duration(params.AllowedLaggingSeconds) * time.Second,
WaitForTaskIds: repStatus.MaxReplicationTaskIds,
}
err = workflow.ExecuteActivity(ctx3, a.WaitReplication, waitRequest).Get(ctx3, nil)
if err != nil {
return err
}
// ** Step 4, initiate handover
handoverRequest := updateStateRequest{
Namespace: params.Namespace,
NewState: enums.NAMESPACE_STATE_HANDOVER,
}
err = workflow.ExecuteActivity(ctx, a.UpdateNamespaceState, handoverRequest).Get(ctx, nil)
if err != nil {
return err
}
// ** Step 5, wait remote to ack handover task id
ao5 := workflow.ActivityOptions{
StartToCloseTimeout: time.Second * 30,
HeartbeatTimeout: time.Second * 10,
ScheduleToCloseTimeout: time.Second * time.Duration(params.HandoverTimeoutSeconds),
RetryPolicy: retryPolicy,
}
ctx5 := workflow.WithActivityOptions(ctx, ao5)
waitHandover := waitHandoverRequest{
ShardCount: metadataResp.ShardCount,
Namespace: params.Namespace,
RemoteCluster: params.RemoteCluster,
}
err5 := workflow.ExecuteActivity(ctx5, a.WaitHandover, waitHandover).Get(ctx5, nil)
if err5 == nil {
// ** Step 6, remote cluster is ready to take over, update Namespace to use remote cluster as active
updateRequest := updateActiveClusterRequest{
Namespace: params.Namespace,
ActiveCluster: params.RemoteCluster,
}
err = workflow.ExecuteActivity(ctx, a.UpdateActiveCluster, updateRequest).Get(ctx, nil)
if err != nil {
return err
}
}
// ** Step 7, reset namespace state from Handover -> Registered
resetStateRequest := updateStateRequest{
Namespace: params.Namespace,
NewState: enums.NAMESPACE_STATE_REGISTERED,
}
err = workflow.ExecuteActivity(ctx, a.UpdateNamespaceState, resetStateRequest).Get(ctx, nil)
if err != nil {
return err
}
return err5
}
// GetMetadata returns history shard count and namespaceID for requested namespace.
func (a *activities) GetMetadata(ctx context.Context, request metadataRequest) (*metadataResponse, error) {
nsEntry, err := a.namespaceRegistry.GetNamespace(namespace.Name(request.Namespace))
if err != nil {
return nil, err
}
return &metadataResponse{
ShardCount: a.historyShardCount,
NamespaceID: string(nsEntry.ID()),
}, nil
}
// GenerateReplicationTasks generates replication task for last history event for each workflow.
func (a *activities) GenerateReplicationTasks(ctx context.Context, request genReplicationForShardRange) error {
perShard := genReplicationForShard{
ShardID: request.BeginShardID,
NamespaceID: request.NamespaceID,
SkipAfterTime: request.SkipAfterTime,
}
var progress heartbeatProgress
if activity.HasHeartbeatDetails(ctx) {
if err := activity.GetHeartbeatDetails(ctx, &progress); err == nil {
perShard.ShardID = progress.ShardID
perShard.PageToken = progress.PageToken
perShard.Index = progress.Index
}
}
for ; perShard.ShardID <= request.EndShardID; perShard.ShardID++ {
if err := a.genReplicationTasks(ctx, perShard); err != nil {
return err
}
// heartbeat progress only apply for first shard
perShard.PageToken = nil
perShard.Index = 0
}
return nil
}
// GetMaxReplicationTaskIDs returns max replication task id per shard
func (a *activities) GetMaxReplicationTaskIDs(ctx context.Context) (*replicationStatus, error) {
resp, err := a.historyClient.GetReplicationStatus(ctx, &historyservice.GetReplicationStatusRequest{})
if err != nil {
return nil, err
}
result := &replicationStatus{MaxReplicationTaskIds: make(map[int32]int64)}
for _, shard := range resp.Shards {
result.MaxReplicationTaskIds[shard.ShardId] = shard.MaxReplicationTaskId
}
return result, nil
}
func (a *activities) WaitReplication(ctx context.Context, waitRequest waitReplicationRequest) error {
for {
done, err := a.checkReplicationOnce(ctx, waitRequest)
if err != nil {
return err
}
if done {
return nil
}
// keep waiting and check again
time.Sleep(time.Second)
activity.RecordHeartbeat(ctx, nil)
}
}
// Check if remote cluster has caught up on all shards on replication tasks
func (a *activities) checkReplicationOnce(ctx context.Context, waitRequest waitReplicationRequest) (bool, error) {
resp, err := a.historyClient.GetReplicationStatus(ctx, &historyservice.GetReplicationStatusRequest{
RemoteClusters: []string{waitRequest.RemoteCluster},
})
if err != nil {
return false, err
}
if int(waitRequest.ShardCount) != len(resp.Shards) {
return false, fmt.Errorf("GetReplicationStatus returns %d shards, expecting %d", len(resp.Shards), waitRequest.ShardCount)
}
// check that every shard has caught up
for _, shard := range resp.Shards {
clusterInfo, ok := shard.RemoteClusters[waitRequest.RemoteCluster]
if !ok {
return false, fmt.Errorf("GetReplicationStatus response for shard %d does not contains remote cluster %s", shard.ShardId, waitRequest.RemoteCluster)
}
if clusterInfo.AckedTaskId == shard.MaxReplicationTaskId {
continue // already caught up, continue to check next shard.
}
if clusterInfo.AckedTaskId < waitRequest.WaitForTaskIds[shard.ShardId] ||
shard.ShardLocalTime.Sub(*clusterInfo.AckedTaskVisibilityTime) > waitRequest.AllowedLagging {
a.logger.Info("Wait for remote ack",
tag.NewInt32("ShardId", shard.ShardId),
tag.NewInt64("AckedTaskId", clusterInfo.AckedTaskId),
tag.NewInt64("WaitForTaskId", waitRequest.WaitForTaskIds[shard.ShardId]),
tag.NewDurationTag("AllowedLagging", waitRequest.AllowedLagging),
tag.NewDurationTag("ActualLagging", shard.ShardLocalTime.Sub(*clusterInfo.AckedTaskVisibilityTime)),
tag.NewStringTag("RemoteCluster", waitRequest.RemoteCluster),
)
return false, nil
}
}
return true, nil
}
func (a *activities) WaitHandover(ctx context.Context, waitRequest waitHandoverRequest) error {
for {
done, err := a.checkHandoverOnce(ctx, waitRequest)
if err != nil {
return err
}
if done {
return nil
}
// keep waiting and check again
time.Sleep(time.Second)
activity.RecordHeartbeat(ctx, nil)
}
}
// Check if remote cluster has caught up on all shards on replication tasks
func (a *activities) checkHandoverOnce(ctx context.Context, waitRequest waitHandoverRequest) (bool, error) {
resp, err := a.historyClient.GetReplicationStatus(ctx, &historyservice.GetReplicationStatusRequest{
RemoteClusters: []string{waitRequest.RemoteCluster},
})
if err != nil {
return false, err
}
if int(waitRequest.ShardCount) != len(resp.Shards) {
return false, fmt.Errorf("GetReplicationStatus returns %d shards, expecting %d", len(resp.Shards), waitRequest.ShardCount)
}
// check that every shard is ready to handover
for _, shard := range resp.Shards {
clusterInfo, ok := shard.RemoteClusters[waitRequest.RemoteCluster]
if !ok {
return false, fmt.Errorf("GetReplicationStatus response for shard %d does not contains remote cluster %s", shard.ShardId, waitRequest.RemoteCluster)
}
handoverInfo, ok := shard.HandoverNamespaces[waitRequest.Namespace]
if !ok {
return false, fmt.Errorf("namespace %s on shard %d is not in handover state", waitRequest.Namespace, shard.ShardId)
}
if clusterInfo.AckedTaskId == shard.MaxReplicationTaskId && clusterInfo.AckedTaskId >= handoverInfo.HandoverReplicationTaskId {
continue // already caught up, continue to check next shard.
}
a.logger.Info("Wait for handover to be ready",
tag.NewInt32("ShardId", shard.ShardId),
tag.NewInt64("AckedTaskId", clusterInfo.AckedTaskId),
tag.NewInt64("HandoverTaskId", handoverInfo.HandoverReplicationTaskId),
tag.NewStringTag("Namespace", waitRequest.Namespace),
tag.NewStringTag("RemoteCluster", waitRequest.RemoteCluster),
)
return false, nil
}
return true, nil
}
func (a *activities) genReplicationTasks(ctx context.Context, request genReplicationForShard) error {
pageToken := request.PageToken
startIndex := request.Index
for {
var listResult *persistence.ListConcreteExecutionsResponse
op := func(ctx context.Context) error {
var err error
listResult, err = a.executionManager.ListConcreteExecutions(&persistence.ListConcreteExecutionsRequest{
ShardID: request.ShardID,
PageSize: listExecutionPageSize,
PageToken: pageToken,
})
return err
}
err := backoff.RetryContext(ctx, op, persistenceRetryPolicy, common.IsPersistenceTransientError)
if err != nil {
return err
}
for i := startIndex; i < len(listResult.States); i++ {
activity.RecordHeartbeat(ctx, heartbeatProgress{
ShardID: request.ShardID,
PageToken: pageToken,
Index: i,
})
ms := listResult.States[i]
if ms.ExecutionInfo.LastUpdateTime != nil && ms.ExecutionInfo.LastUpdateTime.After(request.SkipAfterTime) {
// workflow was updated after SkipAfterTime, no need to generate replication task
continue
}
if ms.ExecutionInfo.NamespaceId != request.NamespaceID {
// skip if not target namespace
continue
}
err := a.genReplicationTaskForOneWorkflow(ctx, definition.NewWorkflowKey(request.NamespaceID, ms.ExecutionInfo.WorkflowId, ms.ExecutionState.RunId))
if err != nil {
return err
}
}
pageToken = listResult.PageToken
startIndex = 0
if pageToken == nil {
break
}
}
return nil
}
func (a *activities) genReplicationTaskForOneWorkflow(ctx context.Context, wKey definition.WorkflowKey) error {
// will generate replication task
op := func(ctx context.Context) error {
var err error
ctx1, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
_, err = a.historyClient.GenerateLastHistoryReplicationTasks(ctx1, &historyservice.GenerateLastHistoryReplicationTasksRequest{
NamespaceId: wKey.NamespaceID,
Execution: &apicommon.WorkflowExecution{
WorkflowId: wKey.WorkflowID,
RunId: wKey.RunID,
},
})
return err
}
err := backoff.RetryContext(ctx, op, historyServiceRetryPolicy, common.IsServiceTransientError)
if err != nil {
if _, ok := err.(*serviceerror.NotFound); ok {
// ignore NotFound error
return nil
}
}
return err
}
func (a *activities) UpdateNamespaceState(ctx context.Context, req updateStateRequest) error {
descResp, err := a.frontendClient.DescribeNamespace(ctx, &workflowservice.DescribeNamespaceRequest{
Namespace: req.Namespace,
})
if err != nil {
return err
}
if descResp.NamespaceInfo.State == req.NewState {
return nil
}
_, err = a.frontendClient.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{
Namespace: req.Namespace,
UpdateInfo: &apinamespace.UpdateNamespaceInfo{
State: req.NewState,
},
})
return err
}
func (a *activities) UpdateActiveCluster(ctx context.Context, req updateActiveClusterRequest) error {
descResp, err := a.frontendClient.DescribeNamespace(ctx, &workflowservice.DescribeNamespaceRequest{
Namespace: req.Namespace,
})
if err != nil {
return err
}
if descResp.ReplicationConfig.GetActiveClusterName() == req.ActiveCluster {
return nil
}
_, err = a.frontendClient.UpdateNamespace(ctx, &workflowservice.UpdateNamespaceRequest{
Namespace: req.Namespace,
ReplicationConfig: &apireplication.NamespaceReplicationConfig{
ActiveClusterName: req.ActiveCluster,
},
})
return err
}
| 1 | 13,619 | I see it is not part of this PR but I somehow overlooked it before. Can you use same import aliases as we use everywhere in the project: `apicommon` -> `commonpb` `apireplication` -> `replicationpb` `"go.temporal.io/api/enums/v1"` -> `enumspb "go.temporal.io/api/enums/v1"` | temporalio-temporal | go |
@@ -235,7 +235,7 @@ class Storage(StorageBase, MigratorMixin):
error_msg = (
"Cannot initialize empty resource timestamp " "when running in readonly."
)
- raise exceptions.BackendError(message=error_msg)
+ raise exceptions.ReadonlyError(message=error_msg)
obj = row
else:
create_result = conn.execute( | 1 | import logging
import os
import warnings
from collections import defaultdict
from kinto.core.decorators import deprecate_kwargs
from kinto.core.storage import (
DEFAULT_DELETED_FIELD,
DEFAULT_ID_FIELD,
DEFAULT_MODIFIED_FIELD,
MISSING,
StorageBase,
exceptions,
)
from kinto.core.storage.postgresql.client import create_from_config
from kinto.core.storage.postgresql.migrator import MigratorMixin
from kinto.core.utils import COMPARISON, json
logger = logging.getLogger(__name__)
HERE = os.path.dirname(__file__)
class Storage(StorageBase, MigratorMixin):
"""Storage backend using PostgreSQL.
Recommended in production (*requires PostgreSQL 9.4 or higher*).
Enable in configuration::
kinto.storage_backend = kinto.core.storage.postgresql
Database location URI can be customized::
kinto.storage_url = postgresql://user:[email protected]:5432/dbname
Alternatively, username and password could also rely on system user ident
or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).
.. note::
Some tables and indices are created when ``kinto migrate`` is run.
This requires some privileges on the database, or some error will
be raised.
**Alternatively**, the schema can be initialized outside the
python application, using the SQL file located in
:file:`kinto/core/storage/postgresql/schema.sql`. This allows to
distinguish schema manipulation privileges from schema usage.
A connection pool is enabled by default::
kinto.storage_pool_size = 10
kinto.storage_maxoverflow = 10
kinto.storage_max_backlog = -1
kinto.storage_pool_recycle = -1
kinto.storage_pool_timeout = 30
kinto.cache_poolclass =
kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog
The ``max_backlog`` limits the number of threads that can be in the queue
waiting for a connection. Once this limit has been reached, any further
attempts to acquire a connection will be rejected immediately, instead of
locking up all threads by keeping them waiting in the queue.
See `dedicated section in SQLAlchemy documentation
<http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_
for default values and behaviour.
.. note::
Using a `dedicated connection pool <http://pgpool.net>`_ is still
recommended to allow load balancing, replication or limit the number
of connections used in a multi-process deployment.
""" # NOQA
# MigratorMixin attributes.
name = "storage"
schema_version = 22
schema_file = os.path.join(HERE, "schema.sql")
migrations_directory = os.path.join(HERE, "migrations")
def __init__(self, client, max_fetch_size, *args, readonly=False, **kwargs):
super().__init__(*args, **kwargs)
self.client = client
self._max_fetch_size = max_fetch_size
self.readonly = readonly
def create_schema(self, dry_run=False):
"""Override create_schema to ensure DB encoding and TZ are OK."""
self._check_database_encoding()
self._check_database_timezone()
return super().create_schema(dry_run)
def initialize_schema(self, dry_run=False):
return self.create_or_migrate_schema(dry_run)
def _check_database_timezone(self):
# Make sure database has UTC timezone.
query = "SELECT current_setting('TIMEZONE') AS timezone;"
with self.client.connect() as conn:
result = conn.execute(query)
obj = result.fetchone()
timezone = obj["timezone"].upper()
if timezone != "UTC": # pragma: no cover
msg = f"Database timezone is not UTC ({timezone})"
warnings.warn(msg)
logger.warning(msg)
def _check_database_encoding(self):
# Make sure database is UTF-8.
query = """
SELECT pg_encoding_to_char(encoding) AS encoding
FROM pg_database
WHERE datname = current_database();
"""
with self.client.connect() as conn:
result = conn.execute(query)
obj = result.fetchone()
encoding = obj["encoding"].lower()
if encoding != "utf8": # pragma: no cover
raise AssertionError(f"Unexpected database encoding {encoding}")
def get_installed_version(self):
"""Return current version of schema or None if not any found."""
# Check for objects table, which definitely indicates a new
# DB. (metadata can exist if the permission schema ran first.)
table_exists_query = """
SELECT table_name
FROM information_schema.tables
WHERE table_name = '{}';
"""
schema_version_metadata_query = """
SELECT value AS version
FROM metadata
WHERE name = 'storage_schema_version'
ORDER BY LPAD(value, 3, '0') DESC;
"""
with self.client.connect() as conn:
result = conn.execute(table_exists_query.format("objects"))
objects_table_exists = result.rowcount > 0
result = conn.execute(table_exists_query.format("records"))
records_table_exists = result.rowcount > 0
if not objects_table_exists and not records_table_exists:
return
result = conn.execute(schema_version_metadata_query)
if result.rowcount > 0:
return int(result.fetchone()["version"])
# No storage_schema_version row.
# Perhaps it got flush()ed by a pre-8.1.2 Kinto (which
# would wipe the metadata table).
# Alternately, maybe we are working from a very early
# Cliquet version which never had a migration.
# Check for a created_at row. If this is gone, it's
# probably been flushed at some point.
query = "SELECT COUNT(*) FROM metadata WHERE name = 'created_at';"
result = conn.execute(query)
was_flushed = int(result.fetchone()[0]) == 0
if not was_flushed:
error_msg = "No schema history; assuming migration from Cliquet (version 1)."
logger.warning(error_msg)
return 1
# We have no idea what the schema is here. Migration
# is completely broken.
# Log an obsequious error message to the user and try
# to recover by assuming the last version where we had
# this bug.
logger.warning(UNKNOWN_SCHEMA_VERSION_MESSAGE)
# This is the last schema version where flushing the
# server would delete the schema version.
MAX_FLUSHABLE_SCHEMA_VERSION = 20
return MAX_FLUSHABLE_SCHEMA_VERSION
def flush(self):
"""Delete objects from tables without destroying schema.
This is used in test suites as well as in the flush plugin.
"""
query = """
DELETE FROM objects;
DELETE FROM timestamps;
"""
with self.client.connect(force_commit=True) as conn:
conn.execute(query)
logger.debug("Flushed PostgreSQL storage tables")
def resource_timestamp(self, resource_name, parent_id):
query_existing = """
WITH existing_timestamps AS (
-- Timestamp of latest object.
(
SELECT last_modified, as_epoch(last_modified) AS last_epoch
FROM objects
WHERE parent_id = :parent_id
AND resource_name = :resource_name
ORDER BY as_epoch(last_modified) DESC
LIMIT 1
)
-- Timestamp of empty resource.
UNION
(
SELECT last_modified, as_epoch(last_modified) AS last_epoch
FROM timestamps
WHERE parent_id = :parent_id
AND resource_name = :resource_name
)
)
SELECT MAX(last_modified) AS last_modified, MAX(last_epoch) AS last_epoch
FROM existing_timestamps
"""
create_if_missing = """
INSERT INTO timestamps (parent_id, resource_name, last_modified)
VALUES (:parent_id, :resource_name, COALESCE(:last_modified, clock_timestamp()::timestamp))
ON CONFLICT (parent_id, resource_name) DO NOTHING
RETURNING as_epoch(last_modified) AS last_epoch
"""
placeholders = dict(parent_id=parent_id, resource_name=resource_name)
with self.client.connect(readonly=False) as conn:
existing_ts = None
ts_result = conn.execute(query_existing, placeholders)
row = ts_result.fetchone() # Will return (None, None) when empty.
existing_ts = row["last_modified"]
# If the backend is readonly, we should not try to create the timestamp.
if self.readonly:
if existing_ts is None:
error_msg = (
"Cannot initialize empty resource timestamp " "when running in readonly."
)
raise exceptions.BackendError(message=error_msg)
obj = row
else:
create_result = conn.execute(
create_if_missing, dict(last_modified=existing_ts, **placeholders)
)
obj = create_result.fetchone() or row
return obj["last_epoch"]
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
def create(
self,
resource_name,
parent_id,
obj,
id_generator=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
id_generator = id_generator or self.id_generator
obj = {**obj}
if id_field in obj:
# Optimistically raise unicity error if object with same
# id already exists.
# Even if this check doesn't find one, be robust against
# conflicts because we could race with another thread.
# Still, this reduces write load because SELECTs are
# cheaper than INSERTs.
try:
existing = self.get(resource_name, parent_id, obj[id_field])
raise exceptions.UnicityError(id_field, existing)
except exceptions.ObjectNotFoundError:
pass
else:
obj[id_field] = id_generator()
# Remove redundancy in data field
query_object = {**obj}
query_object.pop(id_field, None)
query_object.pop(modified_field, None)
# If there is an object in the table and it is deleted = TRUE,
# we want to replace it. Otherwise, we want to do nothing and
# throw a UnicityError. Per
# https://stackoverflow.com/questions/15939902/is-select-or-insert-in-a-function-prone-to-race-conditions/15950324#15950324
# a WHERE clause in the DO UPDATE will lock the conflicting
# row whether it is true or not, so the subsequent SELECT is
# safe. We add a constant "inserted" field to know whether we
# need to throw or not.
query = """
INSERT INTO objects (id, parent_id, resource_name, data, last_modified, deleted)
VALUES (:object_id, :parent_id,
:resource_name, (:data)::JSONB,
from_epoch(:last_modified),
FALSE)
ON CONFLICT (id, parent_id, resource_name) DO UPDATE
SET last_modified = from_epoch(:last_modified),
data = (:data)::JSONB,
deleted = FALSE
WHERE objects.deleted = TRUE
RETURNING id, data, as_epoch(last_modified) AS last_modified;
"""
safe_holders = {}
placeholders = dict(
object_id=obj[id_field],
parent_id=parent_id,
resource_name=resource_name,
last_modified=obj.get(modified_field),
data=json.dumps(query_object),
)
with self.client.connect() as conn:
result = conn.execute(query % safe_holders, placeholders)
inserted = result.fetchone()
if not inserted:
raise exceptions.UnicityError(id_field)
obj[modified_field] = inserted["last_modified"]
return obj
@deprecate_kwargs({"collection_id": "resource_name"})
def get(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
query = """
SELECT as_epoch(last_modified) AS last_modified, data
FROM objects
WHERE id = :object_id
AND parent_id = :parent_id
AND resource_name = :resource_name
AND NOT deleted;
"""
placeholders = dict(object_id=object_id, parent_id=parent_id, resource_name=resource_name)
with self.client.connect(readonly=True) as conn:
result = conn.execute(query, placeholders)
if result.rowcount == 0:
raise exceptions.ObjectNotFoundError(object_id)
else:
existing = result.fetchone()
obj = existing["data"]
obj[id_field] = object_id
obj[modified_field] = existing["last_modified"]
return obj
@deprecate_kwargs({"collection_id": "resource_name", "record": "obj"})
def update(
self,
resource_name,
parent_id,
object_id,
obj,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
# Remove redundancy in data field
query_object = {**obj}
query_object.pop(id_field, None)
query_object.pop(modified_field, None)
query = """
INSERT INTO objects (id, parent_id, resource_name, data, last_modified, deleted)
VALUES (:object_id, :parent_id,
:resource_name, (:data)::JSONB,
from_epoch(:last_modified),
FALSE)
ON CONFLICT (id, parent_id, resource_name) DO UPDATE
SET data = (:data)::JSONB,
deleted = FALSE,
last_modified = GREATEST(from_epoch(:last_modified),
EXCLUDED.last_modified)
RETURNING as_epoch(last_modified) AS last_modified;
"""
placeholders = dict(
object_id=object_id,
parent_id=parent_id,
resource_name=resource_name,
last_modified=obj.get(modified_field),
data=json.dumps(query_object),
)
with self.client.connect() as conn:
result = conn.execute(query, placeholders)
updated = result.fetchone()
obj = {**obj, id_field: object_id}
obj[modified_field] = updated["last_modified"]
return obj
@deprecate_kwargs({"collection_id": "resource_name"})
def delete(
self,
resource_name,
parent_id,
object_id,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
last_modified=None,
):
if with_deleted:
query = """
UPDATE objects
SET deleted=TRUE,
data=(:deleted_data)::JSONB,
last_modified=from_epoch(:last_modified)
WHERE id = :object_id
AND parent_id = :parent_id
AND resource_name = :resource_name
AND NOT deleted
RETURNING as_epoch(last_modified) AS last_modified;
"""
else:
query = """
DELETE FROM objects
WHERE id = :object_id
AND parent_id = :parent_id
AND resource_name = :resource_name
AND NOT deleted
RETURNING as_epoch(last_modified) AS last_modified;
"""
deleted_data = json.dumps(dict([(deleted_field, True)]))
placeholders = dict(
object_id=object_id,
parent_id=parent_id,
resource_name=resource_name,
last_modified=last_modified,
deleted_data=deleted_data,
)
with self.client.connect() as conn:
result = conn.execute(query, placeholders)
if result.rowcount == 0:
raise exceptions.ObjectNotFoundError(object_id)
updated = result.fetchone()
obj = {}
obj[modified_field] = updated["last_modified"]
obj[id_field] = object_id
obj[deleted_field] = True
return obj
@deprecate_kwargs({"collection_id": "resource_name"})
def delete_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
id_field=DEFAULT_ID_FIELD,
with_deleted=True,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
if with_deleted:
query = """
WITH matching_objects AS (
SELECT id, parent_id, resource_name
FROM objects
WHERE {parent_id_filter}
{resource_name_filter}
AND NOT deleted
{conditions_filter}
{pagination_rules}
{sorting}
LIMIT :pagination_limit
FOR UPDATE
)
UPDATE objects
SET deleted=TRUE, data=(:deleted_data)::JSONB, last_modified=NULL
FROM matching_objects
WHERE objects.id = matching_objects.id
AND objects.parent_id = matching_objects.parent_id
AND objects.resource_name = matching_objects.resource_name
RETURNING objects.id, as_epoch(last_modified) AS last_modified;
"""
else:
query = """
WITH matching_objects AS (
SELECT id, parent_id, resource_name
FROM objects
WHERE {parent_id_filter}
{resource_name_filter}
AND NOT deleted
{conditions_filter}
{pagination_rules}
{sorting}
LIMIT :pagination_limit
FOR UPDATE
)
DELETE
FROM objects
USING matching_objects
WHERE objects.id = matching_objects.id
AND objects.parent_id = matching_objects.parent_id
AND objects.resource_name = matching_objects.resource_name
RETURNING objects.id, as_epoch(last_modified) AS last_modified;
"""
id_field = id_field or self.id_field
modified_field = modified_field or self.modified_field
deleted_data = json.dumps(dict([(deleted_field, True)]))
placeholders = dict(
parent_id=parent_id, resource_name=resource_name, deleted_data=deleted_data
)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if "*" in parent_id:
safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
placeholders["parent_id"] = parent_id.replace("*", "%")
else:
safeholders["parent_id_filter"] = "parent_id = :parent_id"
# If resource is None, remove it from query.
if resource_name is None:
safeholders["resource_name_filter"] = ""
else:
safeholders["resource_name_filter"] = "AND resource_name = :resource_name" # NOQA
if filters:
safe_sql, holders = self._format_conditions(filters, id_field, modified_field)
safeholders["conditions_filter"] = f"AND {safe_sql}"
placeholders.update(**holders)
if sorting:
sql, holders = self._format_sorting(sorting, id_field, modified_field)
safeholders["sorting"] = sql
placeholders.update(**holders)
if pagination_rules:
sql, holders = self._format_pagination(pagination_rules, id_field, modified_field)
safeholders["pagination_rules"] = f"AND ({sql})"
placeholders.update(**holders)
# Limit the number of results (pagination).
limit = min(self._max_fetch_size, limit) if limit else self._max_fetch_size
placeholders["pagination_limit"] = limit
with self.client.connect() as conn:
result = conn.execute(query.format_map(safeholders), placeholders)
deleted = result.fetchmany(self._max_fetch_size)
objects = []
for result in deleted:
obj = {}
obj[id_field] = result["id"]
obj[modified_field] = result["last_modified"]
obj[deleted_field] = True
objects.append(obj)
return objects
@deprecate_kwargs({"collection_id": "resource_name"})
def purge_deleted(
self,
resource_name,
parent_id,
before=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
):
delete_tombstones = """
DELETE
FROM objects
WHERE {parent_id_filter}
{resource_name_filter}
{conditions_filter}
"""
id_field = id_field or self.id_field
modified_field = modified_field or self.modified_field
placeholders = dict(parent_id=parent_id, resource_name=resource_name)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if "*" in parent_id:
safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
placeholders["parent_id"] = parent_id.replace("*", "%")
else:
safeholders["parent_id_filter"] = "parent_id = :parent_id"
# If resource is None, remove it from query.
if resource_name is None:
safeholders["resource_name_filter"] = ""
else:
safeholders["resource_name_filter"] = "AND resource_name = :resource_name" # NOQA
if before is not None:
safeholders["conditions_filter"] = "AND as_epoch(last_modified) < :before"
placeholders["before"] = before
with self.client.connect() as conn:
result = conn.execute(delete_tombstones.format_map(safeholders), placeholders)
deleted = result.rowcount
# If purging everything from a parent_id, then clear timestamps.
if resource_name is None and before is None:
delete_timestamps = """
DELETE
FROM timestamps
WHERE {parent_id_filter}
"""
conn.execute(delete_timestamps.format_map(safeholders), placeholders)
return deleted
def list_all(
self,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
query = """
SELECT id, as_epoch(last_modified) AS last_modified, data
FROM objects
WHERE {parent_id_filter}
AND resource_name = :resource_name
{conditions_deleted}
{conditions_filter}
{pagination_rules}
{sorting}
LIMIT :pagination_limit;
"""
rows = self._get_rows(
query,
resource_name,
parent_id,
filters=filters,
sorting=sorting,
pagination_rules=pagination_rules,
limit=limit,
include_deleted=include_deleted,
id_field=id_field,
modified_field=modified_field,
deleted_field=deleted_field,
)
if len(rows) == 0:
return []
records = []
for result in rows:
record = result["data"]
record[id_field] = result["id"]
record[modified_field] = result["last_modified"]
records.append(record)
return records
def count_all(
self,
resource_name,
parent_id,
filters=None,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
query = """
SELECT COUNT(*) AS total_count
FROM objects
WHERE {parent_id_filter}
AND resource_name = :resource_name
AND NOT deleted
{conditions_filter}
"""
rows = self._get_rows(
query,
resource_name,
parent_id,
filters=filters,
id_field=id_field,
modified_field=modified_field,
deleted_field=deleted_field,
)
return rows[0]["total_count"]
def _get_rows(
self,
query,
resource_name,
parent_id,
filters=None,
sorting=None,
pagination_rules=None,
limit=None,
include_deleted=False,
id_field=DEFAULT_ID_FIELD,
modified_field=DEFAULT_MODIFIED_FIELD,
deleted_field=DEFAULT_DELETED_FIELD,
):
# Unsafe strings escaped by PostgreSQL
placeholders = dict(parent_id=parent_id, resource_name=resource_name)
# Safe strings
safeholders = defaultdict(str)
# Handle parent_id as a regex only if it contains *
if "*" in parent_id:
safeholders["parent_id_filter"] = "parent_id LIKE :parent_id"
placeholders["parent_id"] = parent_id.replace("*", "%")
else:
safeholders["parent_id_filter"] = "parent_id = :parent_id"
if filters:
safe_sql, holders = self._format_conditions(filters, id_field, modified_field)
safeholders["conditions_filter"] = f"AND {safe_sql}"
placeholders.update(**holders)
if not include_deleted:
safeholders["conditions_deleted"] = "AND NOT deleted"
if sorting:
sql, holders = self._format_sorting(sorting, id_field, modified_field)
safeholders["sorting"] = sql
placeholders.update(**holders)
if pagination_rules:
sql, holders = self._format_pagination(pagination_rules, id_field, modified_field)
safeholders["pagination_rules"] = f"AND ({sql})"
placeholders.update(**holders)
# Limit the number of results (pagination).
limit = min(self._max_fetch_size + 1, limit) if limit else self._max_fetch_size
placeholders["pagination_limit"] = limit
with self.client.connect(readonly=True) as conn:
result = conn.execute(query.format_map(safeholders), placeholders)
return result.fetchmany(self._max_fetch_size + 1)
def _format_conditions(self, filters, id_field, modified_field, prefix="filters"):
"""Format the filters list in SQL, with placeholders for safe escaping.
.. note::
All conditions are combined using AND.
.. note::
Field name and value are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
operators = {
COMPARISON.EQ: "=",
COMPARISON.NOT: "<>",
COMPARISON.IN: "IN",
COMPARISON.EXCLUDE: "NOT IN",
COMPARISON.LIKE: "ILIKE",
COMPARISON.CONTAINS: "@>",
}
conditions = []
holders = {}
for i, filtr in enumerate(filters):
value = filtr.value
is_like_query = filtr.operator == COMPARISON.LIKE
if filtr.field == id_field:
sql_field = "id"
if isinstance(value, int):
value = str(value)
elif filtr.field == modified_field:
sql_field = "as_epoch(last_modified)"
else:
column_name = "data"
# Subfields: ``person.name`` becomes ``data->person->>name``
subfields = filtr.field.split(".")
for j, subfield in enumerate(subfields):
# Safely escape field name
field_holder = f"{prefix}_field_{i}_{j}"
holders[field_holder] = subfield
# Use ->> to convert the last level to text if
# needed for LIKE query. (Other queries do JSONB comparison.)
column_name += "->>" if j == len(subfields) - 1 and is_like_query else "->"
column_name += f":{field_holder}"
sql_field = column_name
string_field = filtr.field in (id_field, modified_field) or is_like_query
if not string_field and value != MISSING:
# JSONB-ify the value.
if filtr.operator not in (
COMPARISON.IN,
COMPARISON.EXCLUDE,
COMPARISON.CONTAINS_ANY,
):
value = json.dumps(value)
else:
value = [json.dumps(v) for v in value]
if filtr.operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
value = tuple(value)
# WHERE field IN (); -- Fails with syntax error.
if len(value) == 0:
value = (None,)
if is_like_query:
# Operand should be a string.
# Add implicit start/end wildcards if none is specified.
if "*" not in value:
value = f"*{value}*"
value = value.replace("*", "%")
if filtr.operator == COMPARISON.HAS:
operator = "IS NOT NULL" if filtr.value else "IS NULL"
cond = f"{sql_field} {operator}"
elif filtr.operator == COMPARISON.CONTAINS_ANY:
value_holder = f"{prefix}_value_{i}"
holders[value_holder] = value
# In case the field is not a sequence, we ignore the object.
is_json_sequence = f"jsonb_typeof({sql_field}) = 'array'"
# Postgres's && operator doesn't support jsonbs.
# However, it does support Postgres arrays of any
# type. Assume that the referenced field is a JSON
# array and convert it to a Postgres array.
data_as_array = f"""
(SELECT array_agg(elems) FROM jsonb_array_elements({sql_field}) elems)
"""
cond = f"{is_json_sequence} AND {data_as_array} && (:{value_holder})::jsonb[]"
elif value != MISSING:
# Safely escape value. MISSINGs get handled below.
value_holder = f"{prefix}_value_{i}"
holders[value_holder] = value
sql_operator = operators.setdefault(filtr.operator, filtr.operator.value)
cond = f"{sql_field} {sql_operator} :{value_holder}"
# If the field is missing, column_name will produce
# NULL. NULL has strange properties with comparisons
# in SQL -- NULL = anything => NULL, NULL <> anything => NULL.
# We generally want missing fields to be treated as a
# special value that compares as different from
# everything, including JSON null. Do this on a
# per-operator basis.
null_false_operators = (
# NULLs aren't EQ to anything (definitionally).
COMPARISON.EQ,
# So they can't match anything in an INCLUDE.
COMPARISON.IN,
# Nor can they be LIKE anything.
COMPARISON.LIKE,
# NULLs don't contain anything.
COMPARISON.CONTAINS,
COMPARISON.CONTAINS_ANY,
)
null_true_operators = (
# NULLs are automatically not equal to everything.
COMPARISON.NOT,
# Thus they can never be excluded.
COMPARISON.EXCLUDE,
# Match Postgres's default sort behavior
# (NULLS LAST) by allowing NULLs to
# automatically be greater than everything.
COMPARISON.GT,
COMPARISON.MIN,
)
if not (filtr.field == id_field or filtr.field == modified_field):
if value == MISSING:
# Handle MISSING values. The main use case for this is
# pagination, since there's no way to encode MISSING
# at the HTTP API level. Because we only need to cover
# pagination, we don't have to worry about any
# operators besides LT, LE, GT, GE, and EQ, and
# never worry about id_field or modified_field.
#
# Comparing a value against NULL is not the same
# as comparing a NULL against some other value, so
# we need another set of operators for which
# NULLs are OK.
if filtr.operator in (COMPARISON.EQ, COMPARISON.MIN):
# If a row is NULL, then it can be == NULL
# (for the purposes of pagination).
# >= NULL should only match rows that are
# NULL, since there's nothing higher.
cond = f"{sql_field} IS NULL"
elif filtr.operator == COMPARISON.LT:
# If we're looking for < NULL, match only
# non-nulls.
cond = f"{sql_field} IS NOT NULL"
elif filtr.operator == COMPARISON.MAX:
# <= NULL should include everything -- NULL
# because it's equal, and non-nulls because
# they're <.
cond = "TRUE"
elif filtr.operator == COMPARISON.GT:
# Nothing can be greater than NULL (that is,
# higher in search order).
cond = "FALSE"
else:
raise ValueError("Somehow we got a filter with MISSING value")
elif filtr.operator in null_false_operators:
cond = f"({sql_field} IS NOT NULL AND {cond})"
elif filtr.operator in null_true_operators:
cond = f"({sql_field} IS NULL OR {cond})"
else:
# No need to check for LT and MAX because NULL < foo
# is NULL, which is falsy in SQL.
pass
conditions.append(cond)
safe_sql = " AND ".join(conditions)
return safe_sql, holders
def _format_pagination(self, pagination_rules, id_field, modified_field):
"""Format the pagination rules in SQL, with placeholders for
safe escaping.
.. note::
All rules are combined using OR.
.. note::
Field names are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
rules = []
placeholders = {}
for i, rule in enumerate(pagination_rules):
prefix = f"rules_{i}"
safe_sql, holders = self._format_conditions(
rule, id_field, modified_field, prefix=prefix
)
rules.append(safe_sql)
placeholders.update(**holders)
# Unsure how to convert to fstrings
safe_sql = " OR ".join([f"({r})" for r in rules])
return safe_sql, placeholders
def _format_sorting(self, sorting, id_field, modified_field):
"""Format the sorting in SQL, with placeholders for safe escaping.
.. note::
Field names are escaped as they come from HTTP API.
:returns: A SQL string with placeholders, and a dict mapping
placeholders to actual values.
:rtype: tuple
"""
sorts = []
holders = {}
for i, sort in enumerate(sorting):
if sort.field == id_field:
sql_field = "id"
elif sort.field == modified_field:
sql_field = "last_modified"
else:
# Subfields: ``person.name`` becomes ``data->person->name``
subfields = sort.field.split(".")
sql_field = "data"
for j, subfield in enumerate(subfields):
# Safely escape field name
field_holder = f"sort_field_{i}_{j}"
holders[field_holder] = subfield
sql_field += f"->(:{field_holder})"
sql_direction = "ASC" if sort.direction > 0 else "DESC"
sql_sort = f"{sql_field} {sql_direction}"
sorts.append(sql_sort)
safe_sql = f"ORDER BY {', '.join(sorts)}"
return safe_sql, holders
def load_from_config(config):
settings = config.get_settings()
max_fetch_size = int(settings["storage_max_fetch_size"])
readonly = settings.get("readonly", False)
client = create_from_config(config, prefix="storage_")
return Storage(client=client, max_fetch_size=max_fetch_size, readonly=readonly)
UNKNOWN_SCHEMA_VERSION_MESSAGE = """
Missing schema history. Perhaps at some point, this Kinto server was
flushed. Due to a bug in older Kinto versions (see
https://github.com/Kinto/kinto/issues/1460), flushing the server would
cause us to forget what version of the schema was in use. This means
automatic migration is impossible.
Historically, when this happened, Kinto would just assume that the
wiped server had the "current" schema, so you may have been missing a
schema version for quite some time.
To try to recover, we have assumed a schema version corresponding to
the last Kinto version with this bug (schema version 20). However, if
a migration fails, or most queries are broken, you may not actually be
running that schema. You can try to fix this by manually setting the
schema version in the database to what you think it should be using a
command like:
INSERT INTO metadata VALUES ('storage_schema_version', '19');
See https://github.com/Kinto/kinto/wiki/Schema-versions for more details.
""".strip()
| 1 | 12,709 | Nit: Maybe merge these into one string while you are here? | Kinto-kinto | py |
@@ -0,0 +1,15 @@
+package runtime
+
+// noret is a placeholder that can be used to indicate that an async function is not going to directly return here
+func noret() {}
+
+func getParentHandle() *task {
+ panic("NOPE")
+}
+
+func fakeCoroutine(dst **task) {
+ *dst = getCoroutine()
+ for {
+ yield()
+ }
+} | 1 | 1 | 7,520 | Why make a new file for this? I think keeping everything related to the scheduler in a single file increases readability. Especially when there is no API-boundary in between. Functions like `getCoroutine` and `yield` also live in the normal scheduler files so it doesn't seem consistent. | tinygo-org-tinygo | go |
|
@@ -132,9 +132,10 @@ func (c *endpointManagerCallbacks) InvokeRemoveWorkload(old *proto.WorkloadEndpo
// that fail are left in the pending state so they can be retried later.
type endpointManager struct {
// Config.
- ipVersion uint8
- wlIfacesRegexp *regexp.Regexp
- kubeIPVSSupportEnabled bool
+ ipVersion uint8
+ wlIfacesRegexp *regexp.Regexp
+ kubeIPVSSupportEnabled bool
+ ifaceConfigurationEnabled bool
// Our dependencies.
rawTable iptablesTable | 1 | // Copyright (c) 2016-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io"
"net"
"os"
"reflect"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ip"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/libcalico-go/lib/set"
)
// routeTableSyncer is the interface used to manage data-sync of route table managers. This includes notification of
// interface state changes, hooks to queue a full resync and apply routing updates.
type routeTableSyncer interface {
OnIfaceStateChanged(string, ifacemonitor.State)
QueueResync()
Apply() error
}
// routeTable is the interface provided by the standard routetable module used to progam the RIB.
type routeTable interface {
routeTableSyncer
SetRoutes(ifaceName string, targets []routetable.Target)
SetL2Routes(ifaceName string, targets []routetable.L2Target)
}
type hepListener interface {
OnHEPUpdate(hostIfaceToEpMap map[string]proto.HostEndpoint)
}
type endpointManagerCallbacks struct {
addInterface *AddInterfaceFuncs
removeInterface *RemoveInterfaceFuncs
updateInterface *UpdateInterfaceFuncs
updateHostEndpoint *UpdateHostEndpointFuncs
removeHostEndpoint *RemoveHostEndpointFuncs
updateWorkloadEndpoint *UpdateWorkloadEndpointFuncs
removeWorkloadEndpoint *RemoveWorkloadEndpointFuncs
}
func newEndpointManagerCallbacks(callbacks *callbacks, ipVersion uint8) endpointManagerCallbacks {
if ipVersion == 4 {
return endpointManagerCallbacks{
addInterface: callbacks.AddInterfaceV4,
removeInterface: callbacks.RemoveInterfaceV4,
updateInterface: callbacks.UpdateInterfaceV4,
updateHostEndpoint: callbacks.UpdateHostEndpointV4,
removeHostEndpoint: callbacks.RemoveHostEndpointV4,
updateWorkloadEndpoint: callbacks.UpdateWorkloadEndpointV4,
removeWorkloadEndpoint: callbacks.RemoveWorkloadEndpointV4,
}
} else {
return endpointManagerCallbacks{
addInterface: &AddInterfaceFuncs{},
removeInterface: &RemoveInterfaceFuncs{},
updateInterface: &UpdateInterfaceFuncs{},
updateHostEndpoint: &UpdateHostEndpointFuncs{},
removeHostEndpoint: &RemoveHostEndpointFuncs{},
updateWorkloadEndpoint: &UpdateWorkloadEndpointFuncs{},
removeWorkloadEndpoint: &RemoveWorkloadEndpointFuncs{},
}
}
}
func (c *endpointManagerCallbacks) InvokeInterfaceCallbacks(old, new map[string]proto.HostEndpointID) {
for ifaceName, oldEpID := range old {
if newEpID, ok := new[ifaceName]; ok {
if oldEpID != newEpID {
c.updateInterface.Invoke(ifaceName, newEpID)
}
} else {
c.removeInterface.Invoke(ifaceName)
}
}
for ifaceName, newEpID := range new {
if _, ok := old[ifaceName]; !ok {
c.addInterface.Invoke(ifaceName, newEpID)
}
}
}
func (c *endpointManagerCallbacks) InvokeUpdateHostEndpoint(hostEpID proto.HostEndpointID) {
c.updateHostEndpoint.Invoke(hostEpID)
}
func (c *endpointManagerCallbacks) InvokeRemoveHostEndpoint(hostEpID proto.HostEndpointID) {
c.removeHostEndpoint.Invoke(hostEpID)
}
func (c *endpointManagerCallbacks) InvokeUpdateWorkload(old, new *proto.WorkloadEndpoint) {
c.updateWorkloadEndpoint.Invoke(old, new)
}
func (c *endpointManagerCallbacks) InvokeRemoveWorkload(old *proto.WorkloadEndpoint) {
c.removeWorkloadEndpoint.Invoke(old)
}
// endpointManager manages the dataplane resources that belong to each endpoint as well as
// the "dispatch chains" that fan out packets to the right per-endpoint chain.
//
// It programs the relevant iptables chains (via the iptables.Table objects) along with
// per-endpoint routes (via the RouteTable).
//
// Since calculating the dispatch chains is fairly expensive, the main OnUpdate method
// simply records the pending state of each interface and defers the actual calculation
// to CompleteDeferredWork(). This is also the basis of our failure handling; updates
// that fail are left in the pending state so they can be retried later.
type endpointManager struct {
// Config.
ipVersion uint8
wlIfacesRegexp *regexp.Regexp
kubeIPVSSupportEnabled bool
// Our dependencies.
rawTable iptablesTable
mangleTable iptablesTable
filterTable iptablesTable
ruleRenderer rules.RuleRenderer
routeTable routeTable
writeProcSys procSysWriter
osStat func(path string) (os.FileInfo, error)
epMarkMapper rules.EndpointMarkMapper
// Pending updates, cleared in CompleteDeferredWork as the data is copied to the activeXYZ
// fields.
pendingWlEpUpdates map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
pendingIfaceUpdates map[string]ifacemonitor.State
// Active state, updated in CompleteDeferredWork.
activeWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
activeWlIfaceNameToID map[string]proto.WorkloadEndpointID
activeUpIfaces set.Set
activeWlIDToChains map[proto.WorkloadEndpointID][]*iptables.Chain
activeWlDispatchChains map[string]*iptables.Chain
activeEPMarkDispatchChains map[string]*iptables.Chain
// Workload endpoints that would be locally active but are 'shadowed' by other endpoints
// with the same interface name.
shadowedWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint
// wlIfaceNamesToReconfigure contains names of workload interfaces that need to have
// their configuration (sysctls etc.) refreshed.
wlIfaceNamesToReconfigure set.Set
// epIDsToUpdateStatus contains IDs of endpoints that we need to report status for.
// Mix of host and workload endpoint IDs.
epIDsToUpdateStatus set.Set
// hostIfaceToAddrs maps host interface name to the set of IPs on that interface (reported
// fro the dataplane).
hostIfaceToAddrs map[string]set.Set
// rawHostEndpoints contains the raw (i.e. not resolved to interface) host endpoints.
rawHostEndpoints map[proto.HostEndpointID]*proto.HostEndpoint
// hostEndpointsDirty is set to true when host endpoints are updated.
hostEndpointsDirty bool
// activeHostIfaceToChains maps host interface name to the chains that we've programmed.
activeHostIfaceToRawChains map[string][]*iptables.Chain
activeHostIfaceToFiltChains map[string][]*iptables.Chain
activeHostIfaceToMangleIngressChains map[string][]*iptables.Chain
activeHostIfaceToMangleEgressChains map[string][]*iptables.Chain
// Dispatch chains that we've programmed for host endpoints.
activeHostRawDispatchChains map[string]*iptables.Chain
activeHostFilterDispatchChains map[string]*iptables.Chain
activeHostMangleDispatchChains map[string]*iptables.Chain
// activeHostEpIDToIfaceNames records which interfaces we resolved each host endpoint to.
activeHostEpIDToIfaceNames map[proto.HostEndpointID][]string
// activeIfaceNameToHostEpID records which endpoint we resolved each host interface to.
activeIfaceNameToHostEpID map[string]proto.HostEndpointID
newIfaceNameToHostEpID map[string]proto.HostEndpointID
needToCheckDispatchChains bool
needToCheckEndpointMarkChains bool
// Callbacks
OnEndpointStatusUpdate EndpointStatusUpdateCallback
callbacks endpointManagerCallbacks
bpfEnabled bool
bpfEndpointManager hepListener
}
type EndpointStatusUpdateCallback func(ipVersion uint8, id interface{}, status string)
type procSysWriter func(path, value string) error
func newEndpointManager(
rawTable iptablesTable,
mangleTable iptablesTable,
filterTable iptablesTable,
ruleRenderer rules.RuleRenderer,
routeTable routeTable,
ipVersion uint8,
epMarkMapper rules.EndpointMarkMapper,
kubeIPVSSupportEnabled bool,
wlInterfacePrefixes []string,
onWorkloadEndpointStatusUpdate EndpointStatusUpdateCallback,
bpfEnabled bool,
bpfEndpointManager hepListener,
callbacks *callbacks,
) *endpointManager {
return newEndpointManagerWithShims(
rawTable,
mangleTable,
filterTable,
ruleRenderer,
routeTable,
ipVersion,
epMarkMapper,
kubeIPVSSupportEnabled,
wlInterfacePrefixes,
onWorkloadEndpointStatusUpdate,
writeProcSys,
os.Stat,
bpfEnabled,
bpfEndpointManager,
callbacks,
)
}
func newEndpointManagerWithShims(
rawTable iptablesTable,
mangleTable iptablesTable,
filterTable iptablesTable,
ruleRenderer rules.RuleRenderer,
routeTable routeTable,
ipVersion uint8,
epMarkMapper rules.EndpointMarkMapper,
kubeIPVSSupportEnabled bool,
wlInterfacePrefixes []string,
onWorkloadEndpointStatusUpdate EndpointStatusUpdateCallback,
procSysWriter procSysWriter,
osStat func(name string) (os.FileInfo, error),
bpfEnabled bool,
bpfEndpointManager hepListener,
callbacks *callbacks,
) *endpointManager {
wlIfacesPattern := "^(" + strings.Join(wlInterfacePrefixes, "|") + ").*"
wlIfacesRegexp := regexp.MustCompile(wlIfacesPattern)
return &endpointManager{
ipVersion: ipVersion,
wlIfacesRegexp: wlIfacesRegexp,
kubeIPVSSupportEnabled: kubeIPVSSupportEnabled,
bpfEnabled: bpfEnabled,
bpfEndpointManager: bpfEndpointManager,
rawTable: rawTable,
mangleTable: mangleTable,
filterTable: filterTable,
ruleRenderer: ruleRenderer,
routeTable: routeTable,
writeProcSys: procSysWriter,
osStat: osStat,
epMarkMapper: epMarkMapper,
// Pending updates, we store these up as OnUpdate is called, then process them
// in CompleteDeferredWork and transfer the important data to the activeXYX fields.
pendingWlEpUpdates: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
pendingIfaceUpdates: map[string]ifacemonitor.State{},
activeUpIfaces: set.New(),
activeWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
activeWlIfaceNameToID: map[string]proto.WorkloadEndpointID{},
activeWlIDToChains: map[proto.WorkloadEndpointID][]*iptables.Chain{},
shadowedWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{},
wlIfaceNamesToReconfigure: set.New(),
epIDsToUpdateStatus: set.New(),
hostIfaceToAddrs: map[string]set.Set{},
rawHostEndpoints: map[proto.HostEndpointID]*proto.HostEndpoint{},
hostEndpointsDirty: true,
activeHostIfaceToRawChains: map[string][]*iptables.Chain{},
activeHostIfaceToFiltChains: map[string][]*iptables.Chain{},
activeHostIfaceToMangleIngressChains: map[string][]*iptables.Chain{},
activeHostIfaceToMangleEgressChains: map[string][]*iptables.Chain{},
// Caches of the current dispatch chains indexed by chain name. We use these to
// calculate deltas when we need to update the chains.
activeWlDispatchChains: map[string]*iptables.Chain{},
activeHostFilterDispatchChains: map[string]*iptables.Chain{},
activeHostMangleDispatchChains: map[string]*iptables.Chain{},
activeHostRawDispatchChains: map[string]*iptables.Chain{},
activeEPMarkDispatchChains: map[string]*iptables.Chain{},
needToCheckDispatchChains: true, // Need to do start-of-day update.
needToCheckEndpointMarkChains: true, // Need to do start-of-day update.
OnEndpointStatusUpdate: onWorkloadEndpointStatusUpdate,
callbacks: newEndpointManagerCallbacks(callbacks, ipVersion),
}
}
func (m *endpointManager) OnUpdate(protoBufMsg interface{}) {
log.WithField("msg", protoBufMsg).Debug("Received message")
switch msg := protoBufMsg.(type) {
case *proto.WorkloadEndpointUpdate:
m.pendingWlEpUpdates[*msg.Id] = msg.Endpoint
case *proto.WorkloadEndpointRemove:
m.pendingWlEpUpdates[*msg.Id] = nil
case *proto.HostEndpointUpdate:
log.WithField("msg", msg).Debug("Host endpoint update")
m.callbacks.InvokeUpdateHostEndpoint(*msg.Id)
m.rawHostEndpoints[*msg.Id] = msg.Endpoint
m.hostEndpointsDirty = true
m.epIDsToUpdateStatus.Add(*msg.Id)
case *proto.HostEndpointRemove:
log.WithField("msg", msg).Debug("Host endpoint removed")
m.callbacks.InvokeRemoveHostEndpoint(*msg.Id)
delete(m.rawHostEndpoints, *msg.Id)
m.hostEndpointsDirty = true
m.epIDsToUpdateStatus.Add(*msg.Id)
case *ifaceUpdate:
log.WithField("update", msg).Debug("Interface state changed.")
m.pendingIfaceUpdates[msg.Name] = msg.State
case *ifaceAddrsUpdate:
log.WithField("update", msg).Debug("Interface addrs changed.")
if m.wlIfacesRegexp.MatchString(msg.Name) {
log.WithField("update", msg).Debug("Workload interface, ignoring.")
return
}
if msg.Addrs != nil {
m.hostIfaceToAddrs[msg.Name] = msg.Addrs
} else {
delete(m.hostIfaceToAddrs, msg.Name)
}
m.hostEndpointsDirty = true
}
}
func (m *endpointManager) ResolveUpdateBatch() error {
// Copy the pending interface state to the active set and mark any interfaces that have
// changed state for reconfiguration by resolveWorkload/HostEndpoints()
for ifaceName, state := range m.pendingIfaceUpdates {
if state == ifacemonitor.StateUp {
m.activeUpIfaces.Add(ifaceName)
if m.wlIfacesRegexp.MatchString(ifaceName) {
log.WithField("ifaceName", ifaceName).Info(
"Workload interface came up, marking for reconfiguration.")
m.wlIfaceNamesToReconfigure.Add(ifaceName)
}
} else {
m.activeUpIfaces.Discard(ifaceName)
}
// If this interface is linked to any already-existing endpoints, mark the endpoint
// status for recalculation. If the matching endpoint changes when we do
// resolveHostEndpoints() then that will mark old and new matching endpoints for
// update.
m.markEndpointStatusDirtyByIface(ifaceName)
// Clean up as we go...
delete(m.pendingIfaceUpdates, ifaceName)
}
if m.hostEndpointsDirty {
log.Debug("Host endpoints updated, resolving them.")
m.newIfaceNameToHostEpID = m.resolveHostEndpoints()
}
return nil
}
func (m *endpointManager) CompleteDeferredWork() error {
m.resolveWorkloadEndpoints()
if m.hostEndpointsDirty {
log.Debug("Host endpoints updated, resolving them.")
m.updateHostEndpoints()
m.hostEndpointsDirty = false
}
if m.kubeIPVSSupportEnabled && m.needToCheckEndpointMarkChains {
m.resolveEndpointMarks()
m.needToCheckEndpointMarkChains = false
}
// Now send any endpoint status updates.
m.updateEndpointStatuses()
return nil
}
func (m *endpointManager) GetRouteTableSyncers() []routeTableSyncer {
return []routeTableSyncer{m.routeTable}
}
func (m *endpointManager) markEndpointStatusDirtyByIface(ifaceName string) {
logCxt := log.WithField("ifaceName", ifaceName)
if epID, ok := m.activeWlIfaceNameToID[ifaceName]; ok {
logCxt.Info("Workload interface state changed; marking for status update.")
m.epIDsToUpdateStatus.Add(epID)
} else if epID, ok := m.activeIfaceNameToHostEpID[ifaceName]; ok {
logCxt.Info("Host interface state changed; marking for status update.")
m.epIDsToUpdateStatus.Add(epID)
} else {
// We don't know about this interface yet (or it's already been deleted).
// If the endpoint gets created, we'll do the update then. If it's been
// deleted, we've already cleaned it up.
logCxt.Debug("Ignoring interface state change for unknown interface.")
}
}
func (m *endpointManager) updateEndpointStatuses() {
log.WithField("dirtyEndpoints", m.epIDsToUpdateStatus).Debug("Reporting endpoint status.")
m.epIDsToUpdateStatus.Iter(func(item interface{}) error {
switch id := item.(type) {
case proto.WorkloadEndpointID:
status := m.calculateWorkloadEndpointStatus(id)
m.OnEndpointStatusUpdate(m.ipVersion, id, status)
case proto.HostEndpointID:
status := m.calculateHostEndpointStatus(id)
m.OnEndpointStatusUpdate(m.ipVersion, id, status)
}
return set.RemoveItem
})
}
func (m *endpointManager) calculateWorkloadEndpointStatus(id proto.WorkloadEndpointID) string {
logCxt := log.WithField("workloadEndpointID", id)
logCxt.Debug("Re-evaluating workload endpoint status")
var operUp, adminUp, failed bool
workload, known := m.activeWlEndpoints[id]
if known {
adminUp = workload.State == "active"
operUp = m.activeUpIfaces.Contains(workload.Name)
failed = m.wlIfaceNamesToReconfigure.Contains(workload.Name)
}
// Note: if endpoint is not known (i.e. has been deleted), status will be "", which signals
// a deletion.
var status string
if known {
if failed {
status = "error"
} else if operUp && adminUp {
status = "up"
} else {
status = "down"
}
}
logCxt = logCxt.WithFields(log.Fields{
"known": known,
"failed": failed,
"operUp": operUp,
"adminUp": adminUp,
"status": status,
})
logCxt.Info("Re-evaluated workload endpoint status")
return status
}
func (m *endpointManager) calculateHostEndpointStatus(id proto.HostEndpointID) (status string) {
logCxt := log.WithField("hostEndpointID", id)
logCxt.Debug("Re-evaluating host endpoint status")
var resolved, operUp bool
_, known := m.rawHostEndpoints[id]
// Note: if endpoint is not known (i.e. has been deleted), status will be "", which signals
// a deletion.
if known {
ifaceNames := m.activeHostEpIDToIfaceNames[id]
if len(ifaceNames) > 0 {
resolved = true
operUp = true
for _, ifaceName := range ifaceNames {
if ifaceName == allInterfaces {
// For * host endpoints we don't let particular interfaces
// impact their reported status, because it's unclear what
// the semantics would be, and we'd potentially have to look
// at every interface on the host.
continue
}
ifaceUp := m.activeUpIfaces.Contains(ifaceName)
logCxt.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifaceUp": ifaceUp,
}).Debug("Status of matching interface.")
operUp = operUp && ifaceUp
}
}
if resolved && operUp {
status = "up"
} else if resolved {
status = "down"
} else {
// Known but failed to resolve, map that to error.
status = "error"
}
}
logCxt = logCxt.WithFields(log.Fields{
"known": known,
"resolved": resolved,
"operUp": operUp,
"status": status,
})
logCxt.Info("Re-evaluated host endpoint status")
return status
}
func (m *endpointManager) resolveWorkloadEndpoints() {
if len(m.pendingWlEpUpdates) > 0 {
// We're about to make endpoint updates, make sure we recheck the dispatch chains.
m.needToCheckDispatchChains = true
}
removeActiveWorkload := func(logCxt *log.Entry, oldWorkload *proto.WorkloadEndpoint, id proto.WorkloadEndpointID) {
m.callbacks.InvokeRemoveWorkload(oldWorkload)
m.filterTable.RemoveChains(m.activeWlIDToChains[id])
delete(m.activeWlIDToChains, id)
if oldWorkload != nil {
m.epMarkMapper.ReleaseEndpointMark(oldWorkload.Name)
// Remove any routes from the routing table. The RouteTable will remove any
// conntrack entries as a side-effect.
logCxt.Info("Workload removed, deleting old state.")
m.routeTable.SetRoutes(oldWorkload.Name, nil)
m.wlIfaceNamesToReconfigure.Discard(oldWorkload.Name)
delete(m.activeWlIfaceNameToID, oldWorkload.Name)
}
delete(m.activeWlEndpoints, id)
}
// Repeat the following loop until the pending update map is empty. Note that it's possible
// for an endpoint deletion to add a further update into the map (for a previously shadowed
// endpoint), so we cannot assume that a single iteration will always be enough.
for len(m.pendingWlEpUpdates) > 0 {
// Handle pending workload endpoint updates.
for id, workload := range m.pendingWlEpUpdates {
logCxt := log.WithField("id", id)
oldWorkload := m.activeWlEndpoints[id]
if workload != nil {
// Check if there is already an active workload endpoint with the same
// interface name.
if existingId, ok := m.activeWlIfaceNameToID[workload.Name]; ok && existingId != id {
// There is. We need to decide which endpoint takes preference.
// (We presume this is some kind of make before break logic, and the
// situation will shortly be resolved by one of the endpoints being
// removed. But in the meantime we must have predictable
// behaviour.)
logCxt.WithFields(log.Fields{
"interfaceName": workload.Name,
"existingId": existingId,
}).Info("New endpoint has same iface name as existing")
if wlIdsAscending(&existingId, &id) {
logCxt.Info("Existing endpoint takes preference")
m.shadowedWlEndpoints[id] = workload
delete(m.pendingWlEpUpdates, id)
continue
}
logCxt.Info("New endpoint takes preference; remove existing")
m.shadowedWlEndpoints[existingId] = m.activeWlEndpoints[existingId]
removeActiveWorkload(logCxt, m.activeWlEndpoints[existingId], existingId)
}
logCxt.Info("Updating per-endpoint chains.")
if oldWorkload != nil && oldWorkload.Name != workload.Name {
logCxt.Debug("Interface name changed, cleaning up old state")
m.epMarkMapper.ReleaseEndpointMark(oldWorkload.Name)
if !m.bpfEnabled {
m.filterTable.RemoveChains(m.activeWlIDToChains[id])
}
m.routeTable.SetRoutes(oldWorkload.Name, nil)
m.wlIfaceNamesToReconfigure.Discard(oldWorkload.Name)
delete(m.activeWlIfaceNameToID, oldWorkload.Name)
}
var ingressPolicyNames, egressPolicyNames []string
if len(workload.Tiers) > 0 {
ingressPolicyNames = workload.Tiers[0].IngressPolicies
egressPolicyNames = workload.Tiers[0].EgressPolicies
}
adminUp := workload.State == "active"
if !m.bpfEnabled {
chains := m.ruleRenderer.WorkloadEndpointToIptablesChains(
workload.Name,
m.epMarkMapper,
adminUp,
ingressPolicyNames,
egressPolicyNames,
workload.ProfileIds,
)
m.filterTable.UpdateChains(chains)
m.activeWlIDToChains[id] = chains
}
// Collect the IP prefixes that we want to route locally to this endpoint:
logCxt.Info("Updating endpoint routes.")
var (
ipStrings []string
natInfos []*proto.NatInfo
addrSuffix string
)
if m.ipVersion == 4 {
ipStrings = workload.Ipv4Nets
natInfos = workload.Ipv4Nat
addrSuffix = "/32"
} else {
ipStrings = workload.Ipv6Nets
natInfos = workload.Ipv6Nat
addrSuffix = "/128"
}
if len(natInfos) != 0 {
old := ipStrings
ipStrings = make([]string, len(old)+len(natInfos))
copy(ipStrings, old)
for ii, natInfo := range natInfos {
ipStrings[len(old)+ii] = natInfo.ExtIp + addrSuffix
}
}
var mac net.HardwareAddr
if workload.Mac != "" {
var err error
mac, err = net.ParseMAC(workload.Mac)
if err != nil {
logCxt.WithError(err).Error(
"Failed to parse endpoint's MAC address")
}
}
var routeTargets []routetable.Target
if adminUp {
logCxt.Debug("Endpoint up, adding routes")
for _, s := range ipStrings {
routeTargets = append(routeTargets, routetable.Target{
CIDR: ip.MustParseCIDROrIP(s),
DestMAC: mac,
})
}
} else {
logCxt.Debug("Endpoint down, removing routes")
}
m.routeTable.SetRoutes(workload.Name, routeTargets)
m.wlIfaceNamesToReconfigure.Add(workload.Name)
m.activeWlEndpoints[id] = workload
m.activeWlIfaceNameToID[workload.Name] = id
delete(m.pendingWlEpUpdates, id)
m.callbacks.InvokeUpdateWorkload(oldWorkload, workload)
} else {
logCxt.Info("Workload removed, deleting its chains.")
removeActiveWorkload(logCxt, oldWorkload, id)
delete(m.pendingWlEpUpdates, id)
delete(m.shadowedWlEndpoints, id)
if oldWorkload != nil {
// Check for another endpoint with the same interface name,
// that should now become active.
bestShadowedId := proto.WorkloadEndpointID{}
for sId, sWorkload := range m.shadowedWlEndpoints {
logCxt.Infof("Old workload %v", oldWorkload)
logCxt.Infof("Shadowed workload %v", sWorkload)
if sWorkload.Name == oldWorkload.Name {
if bestShadowedId.EndpointId == "" || wlIdsAscending(&sId, &bestShadowedId) {
bestShadowedId = sId
}
}
}
if bestShadowedId.EndpointId != "" {
m.pendingWlEpUpdates[bestShadowedId] = m.shadowedWlEndpoints[bestShadowedId]
delete(m.shadowedWlEndpoints, bestShadowedId)
}
}
}
// Update or deletion, make sure we update the interface status.
m.epIDsToUpdateStatus.Add(id)
}
}
if !m.bpfEnabled && m.needToCheckDispatchChains {
// Rewrite the dispatch chains if they've changed.
newDispatchChains := m.ruleRenderer.WorkloadDispatchChains(m.activeWlEndpoints)
m.updateDispatchChains(m.activeWlDispatchChains, newDispatchChains, m.filterTable)
m.needToCheckDispatchChains = false
// Set flag to update endpoint mark chains.
m.needToCheckEndpointMarkChains = true
}
m.wlIfaceNamesToReconfigure.Iter(func(item interface{}) error {
ifaceName := item.(string)
err := m.configureInterface(ifaceName)
if err != nil {
if exists, err := m.interfaceExistsInProcSys(ifaceName); err == nil && !exists {
// Suppress log spam if interface has been removed.
log.WithError(err).Debug("Failed to configure interface and it seems to be gone")
} else {
log.WithError(err).Warn("Failed to configure interface, will retry")
}
return nil
}
return set.RemoveItem
})
}
func wlIdsAscending(id1, id2 *proto.WorkloadEndpointID) bool {
if id1.OrchestratorId == id2.OrchestratorId {
// Need to compare WorkloadId.
if id1.WorkloadId == id2.WorkloadId {
// Need to compare EndpointId.
return id1.EndpointId < id2.EndpointId
}
return id1.WorkloadId < id2.WorkloadId
}
return id1.OrchestratorId < id2.OrchestratorId
}
func (m *endpointManager) resolveEndpointMarks() {
if m.bpfEnabled {
return
}
// Render endpoint mark chains for active workload and host endpoint.
newEndpointMarkDispatchChains := m.ruleRenderer.EndpointMarkDispatchChains(m.epMarkMapper, m.activeWlEndpoints, m.activeIfaceNameToHostEpID)
m.updateDispatchChains(m.activeEPMarkDispatchChains, newEndpointMarkDispatchChains, m.filterTable)
}
func (m *endpointManager) resolveHostEndpoints() map[string]proto.HostEndpointID {
// Host endpoint resolution
// ------------------------
//
// There is a set of non-workload interfaces on the local host, each possibly with
// IP addresses, that might be controlled by HostEndpoint resources in the Calico
// data model. The data model syntactically allows multiple HostEndpoint
// resources to match a given interface - for example, an interface 'eth1' might
// have address 10.240.0.34 and 172.19.2.98, and the data model might include:
//
// - HostEndpoint A with Name 'eth1'
//
// - HostEndpoint B with ExpectedIpv4Addrs including '10.240.0.34'
//
// - HostEndpoint C with ExpectedIpv4Addrs including '172.19.2.98'.
//
// but at runtime, at any given time, we only allow one HostEndpoint to govern
// that interface. That HostEndpoint becomes the active one, and the others
// remain inactive. (But if, for example, the active HostEndpoint resource was
// deleted, then one of the inactive ones could take over.) Given multiple
// matching HostEndpoint resources, the one that wins is the one with the
// alphabetically earliest HostEndpointId
//
// So the process here is not about 'resolving' a particular HostEndpoint on its
// own. Rather it is looking at the set of local non-workload interfaces and
// seeing which of them are matched by the current set of HostEndpoints as a
// whole.
newIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
for ifaceName, ifaceAddrs := range m.hostIfaceToAddrs {
ifaceCxt := log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifaceAddrs": ifaceAddrs,
})
bestHostEpId := proto.HostEndpointID{}
HostEpLoop:
for id, hostEp := range m.rawHostEndpoints {
logCxt := ifaceCxt.WithField("id", id)
if forAllInterfaces(hostEp) {
logCxt.Debug("Skip all-interfaces host endpoint")
continue
}
logCxt.WithField("bestHostEpId", bestHostEpId).Debug("See if HostEp matches interface")
if (bestHostEpId.EndpointId != "") && (bestHostEpId.EndpointId < id.EndpointId) {
// We already have a HostEndpointId that is better than
// this one, so no point looking any further.
logCxt.Debug("No better than existing match")
continue
}
if hostEp.Name == ifaceName {
// The HostEndpoint has an explicit name that matches the
// interface.
logCxt.Debug("Match on explicit iface name")
bestHostEpId = id
continue
} else if hostEp.Name != "" {
// The HostEndpoint has an explicit name that isn't this
// interface. Continue, so as not to allow it to match on
// an IP address instead.
logCxt.Debug("Rejected on explicit iface name")
continue
}
for _, wantedList := range [][]string{hostEp.ExpectedIpv4Addrs, hostEp.ExpectedIpv6Addrs} {
for _, wanted := range wantedList {
logCxt.WithField("wanted", wanted).Debug("Address wanted by HostEp")
if ifaceAddrs.Contains(wanted) {
// The HostEndpoint expects an IP address
// that is on this interface.
logCxt.Debug("Match on address")
bestHostEpId = id
continue HostEpLoop
}
}
}
}
if bestHostEpId.EndpointId != "" {
logCxt := log.WithFields(log.Fields{
"ifaceName": ifaceName,
"bestHostEpId": bestHostEpId,
})
logCxt.Debug("Got HostEp for interface")
newIfaceNameToHostEpID[ifaceName] = bestHostEpId
}
}
// Similar loop to find the best all-interfaces host endpoint.
bestHostEpId := proto.HostEndpointID{}
for id, hostEp := range m.rawHostEndpoints {
logCxt := log.WithField("id", id)
if !forAllInterfaces(hostEp) {
logCxt.Debug("Skip interface-specific host endpoint")
continue
}
if (bestHostEpId.EndpointId != "") && (bestHostEpId.EndpointId < id.EndpointId) {
// We already have a HostEndpointId that is better than
// this one, so no point looking any further.
logCxt.Debug("No better than existing match")
continue
}
logCxt.Debug("New best all-interfaces host endpoint")
bestHostEpId = id
}
if bestHostEpId.EndpointId != "" {
log.WithField("bestHostEpId", bestHostEpId).Debug("Got all interfaces HostEp")
newIfaceNameToHostEpID[allInterfaces] = bestHostEpId
}
if m.bpfEndpointManager != nil {
// Construct map of interface names to host endpoints, and pass to the BPF endpoint
// manager.
hostIfaceToEpMap := map[string]proto.HostEndpoint{}
for ifaceName, id := range newIfaceNameToHostEpID {
// Note, dereference the proto.HostEndpoint here so that the data lifetime
// is decoupled from the validity of the pointer here.
hostIfaceToEpMap[ifaceName] = *m.rawHostEndpoints[id]
}
m.bpfEndpointManager.OnHEPUpdate(hostIfaceToEpMap)
}
return newIfaceNameToHostEpID
}
func (m *endpointManager) updateHostEndpoints() {
// Calculate filtered name/id maps for untracked and pre-DNAT policy, and a reverse map from
// each active host endpoint to the interfaces it is in use for.
newIfaceNameToHostEpID := m.newIfaceNameToHostEpID
newPreDNATIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
newUntrackedIfaceNameToHostEpID := map[string]proto.HostEndpointID{}
newHostEpIDToIfaceNames := map[proto.HostEndpointID][]string{}
for ifaceName, id := range newIfaceNameToHostEpID {
logCxt := log.WithField("id", id).WithField("ifaceName", ifaceName)
ep := m.rawHostEndpoints[id]
if len(ep.UntrackedTiers) > 0 {
// Optimisation: only add the endpoint chains to the raw (untracked)
// table if there's some untracked policy to apply. This reduces
// per-packet latency since every packet has to traverse the raw
// table.
logCxt.Debug("Endpoint has untracked policies.")
newUntrackedIfaceNameToHostEpID[ifaceName] = id
}
if len(ep.PreDnatTiers) > 0 {
// Similar optimisation (or neatness) for pre-DNAT policy.
logCxt.Debug("Endpoint has pre-DNAT policies.")
newPreDNATIfaceNameToHostEpID[ifaceName] = id
}
// Record that this host endpoint is in use, for status reporting.
newHostEpIDToIfaceNames[id] = append(
newHostEpIDToIfaceNames[id], ifaceName)
// Also determine endpoints for which we need to review status.
oldID, wasKnown := m.activeIfaceNameToHostEpID[ifaceName]
newID, isKnown := newIfaceNameToHostEpID[ifaceName]
if oldID != newID {
logCxt := logCxt.WithFields(log.Fields{
"oldID": m.activeIfaceNameToHostEpID[ifaceName],
"newID": newIfaceNameToHostEpID[ifaceName],
})
logCxt.Info("Endpoint matching interface changed")
if wasKnown {
logCxt.Debug("Endpoint was known, updating old endpoint status")
m.epIDsToUpdateStatus.Add(oldID)
}
if isKnown {
logCxt.Debug("Endpoint is known, updating new endpoint status")
m.epIDsToUpdateStatus.Add(newID)
}
}
}
if !m.bpfEnabled {
// Set up programming for the host endpoints that are now to be used.
newHostIfaceFiltChains := map[string][]*iptables.Chain{}
newHostIfaceMangleEgressChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint normal policy chains.")
hostEp := m.rawHostEndpoints[id]
// Update chains in the filter and mangle tables, for normal traffic.
var ingressPolicyNames, egressPolicyNames []string
var ingressForwardPolicyNames, egressForwardPolicyNames []string
if len(hostEp.Tiers) > 0 {
ingressPolicyNames = hostEp.Tiers[0].IngressPolicies
egressPolicyNames = hostEp.Tiers[0].EgressPolicies
}
if len(hostEp.ForwardTiers) > 0 {
ingressForwardPolicyNames = hostEp.ForwardTiers[0].IngressPolicies
egressForwardPolicyNames = hostEp.ForwardTiers[0].EgressPolicies
}
filtChains := m.ruleRenderer.HostEndpointToFilterChains(
ifaceName,
m.epMarkMapper,
ingressPolicyNames,
egressPolicyNames,
ingressForwardPolicyNames,
egressForwardPolicyNames,
hostEp.ProfileIds,
)
if !reflect.DeepEqual(filtChains, m.activeHostIfaceToFiltChains[ifaceName]) {
m.filterTable.UpdateChains(filtChains)
}
newHostIfaceFiltChains[ifaceName] = filtChains
delete(m.activeHostIfaceToFiltChains, ifaceName)
mangleChains := m.ruleRenderer.HostEndpointToMangleEgressChains(
ifaceName,
egressPolicyNames,
hostEp.ProfileIds,
)
if !reflect.DeepEqual(mangleChains, m.activeHostIfaceToMangleEgressChains[ifaceName]) {
m.mangleTable.UpdateChains(mangleChains)
}
newHostIfaceMangleEgressChains[ifaceName] = mangleChains
delete(m.activeHostIfaceToMangleEgressChains, ifaceName)
}
newHostIfaceMangleIngressChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newPreDNATIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint mangle ingress chains.")
hostEp := m.rawHostEndpoints[id]
// Update the mangle table for preDNAT policy.
var ingressPolicyNames []string
if len(hostEp.PreDnatTiers) > 0 {
ingressPolicyNames = hostEp.PreDnatTiers[0].IngressPolicies
}
mangleChains := m.ruleRenderer.HostEndpointToMangleIngressChains(
ifaceName,
ingressPolicyNames,
)
if !reflect.DeepEqual(mangleChains, m.activeHostIfaceToMangleIngressChains[ifaceName]) {
m.mangleTable.UpdateChains(mangleChains)
}
newHostIfaceMangleIngressChains[ifaceName] = mangleChains
delete(m.activeHostIfaceToMangleIngressChains, ifaceName)
}
newHostIfaceRawChains := map[string][]*iptables.Chain{}
for ifaceName, id := range newUntrackedIfaceNameToHostEpID {
log.WithField("id", id).Info("Updating host endpoint raw chains.")
hostEp := m.rawHostEndpoints[id]
// Update the raw chain, for untracked traffic.
var ingressPolicyNames, egressPolicyNames []string
if len(hostEp.UntrackedTiers) > 0 {
ingressPolicyNames = hostEp.UntrackedTiers[0].IngressPolicies
egressPolicyNames = hostEp.UntrackedTiers[0].EgressPolicies
}
rawChains := m.ruleRenderer.HostEndpointToRawChains(
ifaceName,
ingressPolicyNames,
egressPolicyNames,
)
if !reflect.DeepEqual(rawChains, m.activeHostIfaceToRawChains[ifaceName]) {
m.rawTable.UpdateChains(rawChains)
}
newHostIfaceRawChains[ifaceName] = rawChains
delete(m.activeHostIfaceToRawChains, ifaceName)
}
// Remove programming for host endpoints that are not now in use.
for ifaceName, chains := range m.activeHostIfaceToFiltChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its normal chains.")
m.filterTable.RemoveChains(chains)
}
for ifaceName, chains := range m.activeHostIfaceToMangleEgressChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its mangle egress chains.")
m.mangleTable.RemoveChains(chains)
}
for ifaceName, chains := range m.activeHostIfaceToMangleIngressChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its preDNAT chains.")
m.mangleTable.RemoveChains(chains)
}
for ifaceName, chains := range m.activeHostIfaceToRawChains {
log.WithField("ifaceName", ifaceName).Info(
"Host interface no longer protected, deleting its untracked chains.")
m.rawTable.RemoveChains(chains)
}
m.callbacks.InvokeInterfaceCallbacks(m.activeIfaceNameToHostEpID, newIfaceNameToHostEpID)
m.activeHostIfaceToFiltChains = newHostIfaceFiltChains
m.activeHostIfaceToMangleEgressChains = newHostIfaceMangleEgressChains
m.activeHostIfaceToMangleIngressChains = newHostIfaceMangleIngressChains
m.activeHostIfaceToRawChains = newHostIfaceRawChains
}
// Remember the host endpoints that are now in use.
m.activeIfaceNameToHostEpID = newIfaceNameToHostEpID
m.activeHostEpIDToIfaceNames = newHostEpIDToIfaceNames
if m.bpfEnabled {
// Code after this point is for dispatch chains and IPVS endpoint marking, which
// aren't needed in BPF mode.
return
}
// Rewrite the filter dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newIfaceNameToHostEpID).Debug("Rewrite filter dispatch chains?")
defaultIfaceName := ""
if _, ok := newIfaceNameToHostEpID[allInterfaces]; ok {
// All-interfaces host endpoint is active. Arrange for it to be the default,
// instead of trying to dispatch to it directly based on the non-existent interface
// name *.
defaultIfaceName = allInterfaces
delete(newIfaceNameToHostEpID, allInterfaces)
}
newFilterDispatchChains := m.ruleRenderer.HostDispatchChains(newIfaceNameToHostEpID, defaultIfaceName, true)
newMangleEgressDispatchChains := m.ruleRenderer.ToHostDispatchChains(newIfaceNameToHostEpID, defaultIfaceName)
m.updateDispatchChains(m.activeHostFilterDispatchChains, newFilterDispatchChains, m.filterTable)
// Set flag to update endpoint mark chains.
m.needToCheckEndpointMarkChains = true
// Rewrite the mangle dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newPreDNATIfaceNameToHostEpID).Debug("Rewrite mangle dispatch chains?")
defaultIfaceName = ""
if _, ok := newPreDNATIfaceNameToHostEpID[allInterfaces]; ok {
// All-interfaces host endpoint is active. Arrange for it to be the
// default. This is handled the same as the filter dispatch chains above.
defaultIfaceName = allInterfaces
delete(newPreDNATIfaceNameToHostEpID, allInterfaces)
}
newMangleIngressDispatchChains := m.ruleRenderer.FromHostDispatchChains(newPreDNATIfaceNameToHostEpID, defaultIfaceName)
newMangleDispatchChains := append(newMangleIngressDispatchChains, newMangleEgressDispatchChains...)
m.updateDispatchChains(m.activeHostMangleDispatchChains, newMangleDispatchChains, m.mangleTable)
// Rewrite the raw dispatch chains if they've changed.
log.WithField("resolvedHostEpIds", newUntrackedIfaceNameToHostEpID).Debug("Rewrite raw dispatch chains?")
newRawDispatchChains := m.ruleRenderer.HostDispatchChains(newUntrackedIfaceNameToHostEpID, "", false)
m.updateDispatchChains(m.activeHostRawDispatchChains, newRawDispatchChains, m.rawTable)
log.Debug("Done resolving host endpoints.")
}
// updateDispatchChains updates one of the sets of dispatch chains. It sends the changes to the
// given iptables.Table and records the updates in the activeChains map.
//
// Calculating the minimum update prevents log spam and reduces the work needed in the Table.
func (m *endpointManager) updateDispatchChains(
activeChains map[string]*iptables.Chain,
newChains []*iptables.Chain,
table iptablesTable,
) {
seenChains := set.New()
for _, newChain := range newChains {
seenChains.Add(newChain.Name)
oldChain := activeChains[newChain.Name]
if !reflect.DeepEqual(newChain, oldChain) {
table.UpdateChain(newChain)
activeChains[newChain.Name] = newChain
}
}
for name := range activeChains {
if !seenChains.Contains(name) {
table.RemoveChainByName(name)
delete(activeChains, name)
}
}
}
func (m *endpointManager) interfaceExistsInProcSys(name string) (bool, error) {
var directory string
if m.ipVersion == 4 {
directory = fmt.Sprintf("/proc/sys/net/ipv4/conf/%s", name)
} else {
directory = fmt.Sprintf("/proc/sys/net/ipv6/conf/%s", name)
}
_, err := m.osStat(directory)
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
func (m *endpointManager) configureInterface(name string) error {
if !m.activeUpIfaces.Contains(name) {
log.WithField("ifaceName", name).Info(
"Skipping configuration of interface because it is oper down.")
return nil
}
// Special case: for security, even if our IPv6 support is disabled, try to disable RAs on the interface.
acceptRAPath := fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/accept_ra", name)
err := m.writeProcSys(acceptRAPath, "0")
if err != nil {
if exists, err := m.interfaceExistsInProcSys(name); err == nil && !exists {
log.WithField("file", acceptRAPath).Debug(
"Failed to set accept_ra flag. Interface is missing in /proc/sys.")
} else {
log.WithField("ifaceName", name).Warnf("Could not set accept_ra: %v", err)
}
}
log.WithField("ifaceName", name).Info(
"Applying /proc/sys configuration to interface.")
if m.ipVersion == 4 {
// Enable routing to localhost. This is required to allow for NAT to the local
// host.
err := m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/route_localnet", name), "1")
if err != nil {
return err
}
// Normally, the kernel has a delay before responding to proxy ARP but we know
// that's not needed in a Calico network so we disable it.
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/neigh/%s/proxy_delay", name), "0")
if err != nil {
return err
}
// Enable proxy ARP, this makes the host respond to all ARP requests with its own
// MAC. This has a couple of advantages:
//
// - In OpenStack, we're forced to configure the guest's networking using DHCP.
// Since DHCP requires a subnet and gateway, representing the Calico network
// in the natural way would lose a lot of IP addresses. For IPv4, we'd have to
// advertise a distinct /30 to each guest, which would use up 4 IPs per guest.
// Using proxy ARP, we can advertise the whole pool to each guest as its subnet
// but have the host respond to all ARP requests and route all the traffic whether
// it is on or off subnet.
//
// - For containers, we install explicit routes into the containers network
// namespace and we use a link-local address for the gateway. Turing on proxy ARP
// means that we don't need to assign the link local address explicitly to each
// host side of the veth, which is one fewer thing to maintain and one fewer
// thing we may clash over.
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/proxy_arp", name), "1")
if err != nil {
return err
}
// Enable IP forwarding of packets coming _from_ this interface. For packets to
// be forwarded in both directions we need this flag to be set on the fabric-facing
// interface too (or for the global default to be set).
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv4/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
} else {
// Enable proxy NDP, similarly to proxy ARP, described above.
err := m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/proxy_ndp", name), "1")
if err != nil {
return err
}
// Enable IP forwarding of packets coming _from_ this interface. For packets to
// be forwarded in both directions we need this flag to be set on the fabric-facing
// interface too (or for the global default to be set).
err = m.writeProcSys(fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/forwarding", name), "1")
if err != nil {
return err
}
}
return nil
}
func writeProcSys(path, value string) error {
f, err := os.OpenFile(path, os.O_WRONLY, 0)
if err != nil {
return err
}
n, err := f.Write([]byte(value))
if err == nil && n < len(value) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// The interface name that we use to mean "all interfaces". This is intentionally longer than
// IFNAMSIZ (16) characters, so that it can't possibly match a real interface name.
var allInterfaces = "any-interface-at-all"
// True if the given host endpoint is for all interfaces, as opposed to for a specific interface.
func forAllInterfaces(hep *proto.HostEndpoint) bool {
return hep.Name == "*"
}
// for implementing the endpointsSource interface
func (m *endpointManager) GetRawHostEndpoints() map[proto.HostEndpointID]*proto.HostEndpoint {
return m.rawHostEndpoints
}
| 1 | 19,088 | tiniest of nits: why would you abbreviate "interface" but not "configuration" ? | projectcalico-felix | c |
@@ -19,9 +19,6 @@ namespace Samples.HttpMessageHandler
private static string Url;
-#if NETFRAMEWORK
- [LoaderOptimization(LoaderOptimization.MultiDomainHost)]
-#endif
public static void Main(string[] args)
{
bool tracingDisabled = args.Any(arg => arg.Equals("TracingDisabled", StringComparison.OrdinalIgnoreCase)); | 1 | using System;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Core.Tools;
namespace Samples.HttpMessageHandler
{
public static class Program
{
private const string RequestContent = "PING";
private const string ResponseContent = "PONG";
private static readonly Encoding Utf8 = Encoding.UTF8;
private static Thread listenerThread;
private static string Url;
#if NETFRAMEWORK
[LoaderOptimization(LoaderOptimization.MultiDomainHost)]
#endif
public static void Main(string[] args)
{
bool tracingDisabled = args.Any(arg => arg.Equals("TracingDisabled", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"TracingDisabled {tracingDisabled}");
bool useHttpClient = args.Any(arg => arg.Equals("HttpClient", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"HttpClient {useHttpClient}");
bool useWebClient = args.Any(arg => arg.Equals("WebClient", StringComparison.OrdinalIgnoreCase));
Console.WriteLine($"WebClient {useWebClient}");
string port = args.FirstOrDefault(arg => arg.StartsWith("Port="))?.Split('=')[1] ?? "9000";
Console.WriteLine($"Port {port}");
using (var listener = StartHttpListenerWithPortResilience(port))
{
Console.WriteLine();
Console.WriteLine($"Starting HTTP listener at {Url}");
if (args.Length == 0 || args.Any(arg => arg.Equals("HttpClient", StringComparison.OrdinalIgnoreCase)))
{
// send an http request using HttpClient
Console.WriteLine();
Console.WriteLine("Sending request with HttpClient.");
SendHttpClientRequestAsync(tracingDisabled).GetAwaiter().GetResult();
}
if (args.Length == 0 || args.Any(arg => arg.Equals("WebClient", StringComparison.OrdinalIgnoreCase)))
{
// send an http request using WebClient
Console.WriteLine();
Console.WriteLine("Sending request with WebClient.");
SendWebClientRequest(tracingDisabled);
}
Console.WriteLine();
Console.WriteLine("Stopping HTTP listener.");
listener.Stop();
}
// Force process to end, otherwise the background listener thread lives forever in .NET Core.
// Apparently listener.GetContext() doesn't throw an exception if listener.Stop() is called,
// like it does in .NET Framework.
Environment.Exit(0);
}
public static HttpListener StartHttpListenerWithPortResilience(string port, int retries = 5)
{
// try up to 5 consecutive ports before giving up
while (true)
{
Url = $"http://localhost:{port}/Samples.HttpMessageHandler/";
// seems like we can't reuse a listener if it fails to start,
// so create a new listener each time we retry
var listener = new HttpListener();
listener.Prefixes.Add(Url);
try
{
listener.Start();
listenerThread = new Thread(HandleHttpRequests);
listenerThread.Start(listener);
return listener;
}
catch (HttpListenerException) when (retries > 0)
{
// only catch the exception if there are retries left
port = TcpPortProvider.GetOpenPort().ToString();
retries--;
}
// always close listener if exception is thrown,
// whether it was caught or not
listener.Close();
}
}
private static async Task SendHttpClientRequestAsync(bool tracingDisabled)
{
Console.WriteLine($"[HttpClient] sending request to {Url}");
var clientRequestContent = new StringContent(RequestContent, Utf8);
using (var client = new HttpClient())
{
if (tracingDisabled)
{
client.DefaultRequestHeaders.Add("x-datadog-tracing-enabled", "false");
}
using (var responseMessage = await client.PostAsync(Url, clientRequestContent))
{
// read response content and headers
var responseContent = await responseMessage.Content.ReadAsStringAsync();
Console.WriteLine($"[HttpClient] response content: {responseContent}");
foreach (var header in responseMessage.Headers)
{
var name = header.Key;
var values = string.Join(",", header.Value);
Console.WriteLine($"[HttpClient] response header: {name}={values}");
}
}
}
#if NETCOREAPP
using (var client = new HttpClient(new SocketsHttpHandler()))
{
if (tracingDisabled)
{
client.DefaultRequestHeaders.Add("x-datadog-tracing-enabled", "false");
}
using (var responseMessage = await client.PostAsync(Url, clientRequestContent))
{
// read response content and headers
var responseContent = await responseMessage.Content.ReadAsStringAsync();
Console.WriteLine($"[HttpClient] response content: {responseContent}");
foreach (var header in responseMessage.Headers)
{
var name = header.Key;
var values = string.Join(",", header.Value);
Console.WriteLine($"[HttpClient] response header: {name}={values}");
}
}
}
#endif
}
private static void SendWebClientRequest(bool tracingDisabled)
{
Console.WriteLine($"[WebClient] sending request to {Url}");
using (var webClient = new WebClient())
{
webClient.Encoding = Utf8;
if (tracingDisabled)
{
webClient.Headers.Add("x-datadog-tracing-enabled", "false");
}
var responseContent = webClient.DownloadString(Url);
Console.WriteLine($"[WebClient] response content: {responseContent}");
foreach (string headerName in webClient.ResponseHeaders)
{
string headerValue = webClient.ResponseHeaders[headerName];
Console.WriteLine($"[WebClient] response header: {headerName}={headerValue}");
}
}
}
private static void HandleHttpRequests(object state)
{
var listener = (HttpListener)state;
while (listener.IsListening)
{
try
{
var context = listener.GetContext();
Console.WriteLine("[HttpListener] received request");
// read request content and headers
using (var reader = new StreamReader(context.Request.InputStream, context.Request.ContentEncoding))
{
string requestContent = reader.ReadToEnd();
Console.WriteLine($"[HttpListener] request content: {requestContent}");
foreach (string headerName in context.Request.Headers)
{
string headerValue = context.Request.Headers[headerName];
Console.WriteLine($"[HttpListener] request header: {headerName}={headerValue}");
}
}
// write response content
byte[] responseBytes = Utf8.GetBytes(ResponseContent);
context.Response.ContentEncoding = Utf8;
context.Response.ContentLength64 = responseBytes.Length;
context.Response.OutputStream.Write(responseBytes, 0, responseBytes.Length);
// we must close the response
context.Response.Close();
}
catch (HttpListenerException)
{
// listener was stopped,
// ignore to let the loop end and the method return
}
}
}
}
}
| 1 | 17,074 | Removing since all of the domain-neutral testing will be done in the new `Samples.MultiDomainHost.Runner` app | DataDog-dd-trace-dotnet | .cs |
@@ -50,6 +50,8 @@ def getpcmd(pid):
spid, scmd = line.strip().split(' ', 1)
if int(spid) == int(pid):
return scmd
+ # Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
+ return '[PROCESS_WITH_PID={}]'.format(pid)
def get_info(pid_dir, my_pid=None): | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Locking functionality when launching things from the command line.
Uses a pidfile.
This prevents multiple identical workflows to be launched simultaneously.
"""
from __future__ import print_function
import hashlib
import os
from luigi import six
def getpcmd(pid):
"""
Returns command of process.
:param pid:
"""
if os.name == "nt":
# Use wmic command instead of ps on Windows.
cmd = 'wmic path win32_process where ProcessID=%s get Commandline' % (pid, )
with os.popen(cmd, 'r') as p:
lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
if lines:
_, val = lines
return val
else:
cmd = 'ps -o pid,args'
with os.popen(cmd, 'r') as p:
# Skip the column titles
p.readline()
for line in p:
spid, scmd = line.strip().split(' ', 1)
if int(spid) == int(pid):
return scmd
def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
if my_pid is None:
my_pid = os.getpid()
my_cmd = getpcmd(my_pid)
if six.PY3:
cmd_hash = my_cmd.encode('utf8')
else:
cmd_hash = my_cmd
pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
def acquire_for(pid_dir, num_available=1, kill_signal=None):
"""
Makes sure the process is only run once at the same time with the same name.
Notice that we since we check the process name, different parameters to the same
command can spawn multiple processes at the same time, i.e. running
"/usr/bin/my_process" does not prevent anyone from launching
"/usr/bin/my_process --foo bar".
"""
my_pid, my_cmd, pid_file = get_info(pid_dir)
# Check if there is a pid file corresponding to this name
if not os.path.exists(pid_dir):
os.mkdir(pid_dir)
os.chmod(pid_dir, 0o777)
pids = set()
pid_cmds = {}
if os.path.exists(pid_file):
# There is such a file - read the pid and look up its process name
pids.update(filter(None, map(str.strip, open(pid_file))))
pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
if kill_signal is not None:
for pid in map(int, matching_pids):
os.kill(pid, kill_signal)
elif len(matching_pids) >= num_available:
# We are already running under a different pid
print('Pid(s)', ', '.join(matching_pids), 'already running')
return False
else:
# The pid belongs to something else, we could
pass
pid_cmds[str(my_pid)] = my_cmd
# Write pids
pids.add(str(my_pid))
with open(pid_file, 'w') as f:
f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
# Make the file writable by all
if os.name == 'nt':
pass
else:
s = os.stat(pid_file)
if os.getuid() == s.st_uid:
os.chmod(pid_file, s.st_mode | 0o777)
return True
| 1 | 14,328 | is there a better way to detect this rather than just always assuming it's cygwin if everything else fails? i think you can check `if sys.platform == 'cygwin'` | spotify-luigi | py |
@@ -410,6 +410,10 @@ def _build_internal(package, path, dry_run, env):
try:
_clone_git_repo(url, branch, tmpdir)
build_from_path(package, tmpdir, dry_run=dry_run, env=env)
+ except yaml.scanner.ScannerError as ex:
+ message_parts = str(ex).split('\n')
+ message_parts.insert(0, "Syntax error while reading {!r}".format(path))
+ raise CommandException('\n '.join(message_parts))
except Exception as exc:
msg = "attempting git clone raised exception: {exc}"
raise CommandException(msg.format(exc=exc)) | 1 | # -*- coding: utf-8 -*-
"""
Command line parsing and command dispatch
"""
from __future__ import print_function
from builtins import input # pylint:disable=W0622
from datetime import datetime
import gzip
import hashlib
import json
import os
import re
from shutil import copyfileobj, move, rmtree
import stat
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import yaml
from packaging.version import Version
import pandas as pd
import pkg_resources
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from six import iteritems, string_types
from six.moves.urllib.parse import urlparse, urlunparse
from tqdm import tqdm
from .build import (build_package, build_package_from_contents, generate_build_file,
generate_contents, BuildException)
from .const import DEFAULT_BUILDFILE, LATEST_TAG
from .core import (hash_contents, find_object_hashes, PackageFormat, TableNode, FileNode, GroupNode,
decode_node, encode_node, exec_yaml_python, CommandException, diff_dataframes,
load_yaml)
from .hashing import digest_file
from .store import PackageStore, parse_package, parse_package_extended
from .util import BASE_DIR, FileWithReadProgress, gzip_compress
from . import check_functions as qc
from .. import nodes
# pyOpenSSL and S3 don't play well together. pyOpenSSL is completely optional, but gets enabled by requests.
# So... We disable it. That's what boto does.
# https://github.com/boto/botocore/issues/760
# https://github.com/boto/botocore/pull/803
try:
from urllib3.contrib import pyopenssl
pyopenssl.extract_from_urllib3()
except ImportError:
pass
DEFAULT_REGISTRY_URL = 'https://pkg.quiltdata.com'
GIT_URL_RE = re.compile(r'(?P<url>http[s]?://[\w./~_-]+\.git)(?:@(?P<branch>[\w_-]+))?')
CHUNK_SIZE = 4096
PARALLEL_UPLOADS = 20
S3_CONNECT_TIMEOUT = 30
S3_READ_TIMEOUT = 30
CONTENT_RANGE_RE = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
LOG_TIMEOUT = 3 # 3 seconds
VERSION = pkg_resources.require('quilt')[0].version
_registry_url = None
def _load_config():
config_path = os.path.join(BASE_DIR, 'config.json')
if os.path.exists(config_path):
with open(config_path) as fd:
return json.load(fd)
return {}
def _save_config(cfg):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
config_path = os.path.join(BASE_DIR, 'config.json')
with open(config_path, 'w') as fd:
json.dump(cfg, fd)
def get_registry_url():
global _registry_url
if _registry_url is not None:
return _registry_url
# Env variable; overrides the config.
url = os.environ.get('QUILT_PKG_URL')
if url is None:
# Config file (generated by `quilt config`).
cfg = _load_config()
url = cfg.get('registry_url', '')
# '' means default URL.
_registry_url = url or DEFAULT_REGISTRY_URL
return _registry_url
def config():
answer = input("Please enter the URL for your custom Quilt registry (ask your administrator),\n" +
"or leave this line blank to use the default registry: ")
if answer:
url = urlparse(answer.rstrip('/'))
if (url.scheme not in ['http', 'https'] or not url.netloc or
url.path or url.params or url.query or url.fragment):
raise CommandException("Invalid URL: %s" % answer)
canonical_url = urlunparse(url)
else:
# When saving the config, store '' instead of the actual URL in case we ever change it.
canonical_url = ''
cfg = _load_config()
cfg['registry_url'] = canonical_url
_save_config(cfg)
# Clear the cached URL.
global _registry_url
_registry_url = None
def get_auth_path():
url = get_registry_url()
if url == DEFAULT_REGISTRY_URL:
suffix = ''
else:
# Store different servers' auth in different files.
suffix = "-%.8s" % hashlib.md5(url.encode('utf-8')).hexdigest()
return os.path.join(BASE_DIR, 'auth%s.json' % suffix)
def _update_auth(refresh_token):
response = requests.post("%s/api/token" % get_registry_url(), data=dict(
refresh_token=refresh_token
))
if response.status_code != requests.codes.ok:
raise CommandException("Authentication error: %s" % response.status_code)
data = response.json()
error = data.get('error')
if error is not None:
raise CommandException("Failed to log in: %s" % error)
return dict(
refresh_token=data['refresh_token'],
access_token=data['access_token'],
expires_at=data['expires_at']
)
def _save_auth(auth):
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
file_path = get_auth_path()
with open(file_path, 'w') as fd:
os.chmod(file_path, stat.S_IRUSR | stat.S_IWUSR)
json.dump(auth, fd)
def _handle_response(resp, **kwargs):
_ = kwargs # unused pylint:disable=W0613
if resp.status_code == requests.codes.unauthorized:
raise CommandException("Authentication failed. Run `quilt login` again.")
elif not resp.ok:
try:
data = resp.json()
raise CommandException(data['message'])
except ValueError:
raise CommandException("Unexpected failure: error %s" % resp.status_code)
def _create_auth():
"""
Reads the credentials, updates the access token if necessary, and returns it.
"""
file_path = get_auth_path()
if os.path.exists(file_path):
with open(file_path) as fd:
auth = json.load(fd)
# If the access token expires within a minute, update it.
if auth['expires_at'] < time.time() + 60:
try:
auth = _update_auth(auth['refresh_token'])
except CommandException as ex:
raise CommandException(
"Failed to update the access token (%s). Run `quilt login` again." % ex
)
_save_auth(auth)
else:
# The auth file doesn't exist, probably because the
# user hasn't run quilt login yet.
auth = None
return auth
def _create_session(auth):
"""
Creates a session object to be used for `push`, `install`, etc.
"""
session = requests.Session()
session.hooks.update(dict(
response=_handle_response
))
session.headers.update({
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "quilt-cli/%s" % VERSION,
})
if auth is not None:
session.headers["Authorization"] = "Bearer %s" % auth['access_token']
return session
_session = None # pylint:disable=C0103
def _get_session():
"""
Creates a session or returns an existing session.
"""
global _session # pylint:disable=C0103
if _session is None:
auth = _create_auth()
_session = _create_session(auth)
return _session
def _clear_session():
global _session # pylint:disable=C0103
if _session is not None:
_session.close()
_session = None
def _open_url(url):
try:
if sys.platform == 'win32':
os.startfile(url) # pylint:disable=E1101
elif sys.platform == 'darwin':
with open(os.devnull, 'r+') as null:
subprocess.check_call(['open', url], stdin=null, stdout=null, stderr=null)
else:
with open(os.devnull, 'r+') as null:
subprocess.check_call(['xdg-open', url], stdin=null, stdout=null, stderr=null)
except Exception as ex: # pylint:disable=W0703
print("Failed to launch the browser: %s" % ex)
def _match_hash(session, owner, pkg, hash, raise_exception=True):
# short-circuit for exact length
if len(hash) == 64:
return hash
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for entry in reversed(response.json()['logs']):
# support short hashes
if entry['hash'].startswith(hash):
return entry['hash']
if raise_exception:
raise CommandException("Invalid hash for package {owner}/{pkg}: {hash}".format(
hash=hash, owner=owner, pkg=pkg))
return None
def login():
"""
Authenticate.
Launches a web browser and asks the user for a token.
"""
login_url = "%s/login" % get_registry_url()
print("Launching a web browser...")
print("If that didn't work, please visit the following URL: %s" % login_url)
_open_url(login_url)
print()
refresh_token = input("Enter the code from the webpage: ")
login_with_token(refresh_token)
def login_with_token(refresh_token):
"""
Authenticate using an existing token.
"""
# Get an access token and a new refresh token.
auth = _update_auth(refresh_token)
_save_auth(auth)
_clear_session()
def logout():
"""
Become anonymous. Useful for testing.
"""
auth_file = get_auth_path()
# TODO revoke refresh token (without logging out of web sessions)
if os.path.exists(auth_file):
os.remove(auth_file)
else:
print("Already logged out.")
_clear_session()
def generate(directory):
"""
Generate a build-file for quilt build from a directory of
source files.
"""
try:
buildfilepath = generate_build_file(directory)
except BuildException as builderror:
raise CommandException(str(builderror))
print("Generated build-file %s." % (buildfilepath))
def diff_node_dataframe(package, nodename, dataframe):
"""
compare two dataframes and print the result
WIP: find_node_by_name() doesn't work yet.
TODO: higher level API: diff_two_files(filepath1, filepath2)
TODO: higher level API: diff_node_file(file, package, nodename, filepath)
"""
owner, pkg = parse_package(package)
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
node = pkgobj.find_node_by_name(nodename)
if node is None:
raise CommandException("Node path not found: {}".format(nodename))
quilt_dataframe = pkgobj.get_obj(node)
return diff_dataframes(quilt_dataframe, dataframe)
def check(path=None, env='default'):
"""
Execute the checks: rules for a given build.yml file.
"""
# TODO: add files=<list of files> to check only a subset...
# also useful for 'quilt build' to exclude certain files?
# (if not, then require dry_run=True if files!=None/all)
build("dry_run/dry_run", path=path, dry_run=True, env=env)
def _clone_git_repo(url, branch, dest):
cmd = ['git', 'clone', '-q', '--depth=1']
if branch:
cmd += ['-b', branch]
cmd += [url, dest]
subprocess.check_call(cmd)
def _log(**kwargs):
# TODO(dima): Save logs to a file, then send them when we get a chance.
cfg = _load_config()
if cfg.get('disable_analytics'):
return
session = _get_session()
# Disable error handling.
orig_response_hooks = session.hooks.get('response')
session.hooks.update(dict(
response=None
))
try:
session.post(
"{url}/api/log".format(
url=get_registry_url(),
),
data=json.dumps([kwargs]),
timeout=LOG_TIMEOUT,
)
except requests.exceptions.RequestException:
# Ignore logging errors.
pass
# restore disabled error-handling
session.hooks['response'] = orig_response_hooks
def build(package, path=None, dry_run=False, env='default'):
"""
Compile a Quilt data package, either from a build file or an existing package node.
"""
package_hash = hashlib.md5(package.encode('utf-8')).hexdigest()
try:
_build_internal(package, path, dry_run, env)
except Exception as ex:
_log(type='build', package=package_hash, dry_run=dry_run, env=env, error=str(ex))
raise
_log(type='build', package=package_hash, dry_run=dry_run, env=env)
def _build_internal(package, path, dry_run, env):
# we may have a path, git URL, PackageNode, or None
if isinstance(path, string_types):
# is this a git url?
is_git_url = GIT_URL_RE.match(path)
if is_git_url:
tmpdir = tempfile.mkdtemp()
url = is_git_url.group('url')
branch = is_git_url.group('branch')
try:
_clone_git_repo(url, branch, tmpdir)
build_from_path(package, tmpdir, dry_run=dry_run, env=env)
except Exception as exc:
msg = "attempting git clone raised exception: {exc}"
raise CommandException(msg.format(exc=exc))
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
else:
build_from_path(package, path, dry_run=dry_run, env=env)
elif isinstance(path, nodes.PackageNode):
assert not dry_run # TODO?
build_from_node(package, path)
elif path is None:
assert not dry_run # TODO?
build_empty(package)
else:
raise ValueError("Expected a PackageNode, path or git URL, but got %r" % path)
def build_empty(package):
"""
Create an empty package for convenient editing of de novo packages
"""
owner, pkg = parse_package(package)
store = PackageStore()
new = store.create_package(owner, pkg)
new.save_contents()
def build_from_node(package, node):
"""
Compile a Quilt data package from an existing package node.
"""
owner, pkg = parse_package(package)
# deliberate access of protected member
store = node._package.get_store()
package_obj = store.create_package(owner, pkg)
def _process_node(node, path=''):
if isinstance(node, nodes.GroupNode):
for key, child in node._items():
_process_node(child, path + '/' + key)
elif isinstance(node, nodes.DataNode):
core_node = node._node
metadata = core_node.metadata or {}
if isinstance(core_node, TableNode):
dataframe = node._data()
package_obj.save_df(dataframe, path, metadata.get('q_path'), metadata.get('q_ext'),
'pandas', PackageFormat.default)
elif isinstance(core_node, FileNode):
src_path = node._data()
package_obj.save_file(src_path, path, metadata.get('q_path'))
else:
assert False, "Unexpected core node type: %r" % core_node
else:
assert False, "Unexpected node type: %r" % node
_process_node(node)
package_obj.save_contents()
def build_from_path(package, path, dry_run=False, env='default'):
"""
Compile a Quilt data package from a build file.
Path can be a directory, in which case the build file will be generated automatically.
"""
owner, pkg = parse_package(package)
if not os.path.exists(path):
raise CommandException("%s does not exist." % path)
try:
if os.path.isdir(path):
buildpath = os.path.join(path, DEFAULT_BUILDFILE)
if os.path.exists(buildpath):
raise CommandException(
"Build file already exists. Run `quilt build %r` instead." % buildpath
)
contents = generate_contents(path, DEFAULT_BUILDFILE)
build_package_from_contents(owner, pkg, path, contents, dry_run=dry_run, env=env)
else:
build_package(owner, pkg, path, dry_run=dry_run, env=env)
if not dry_run:
print("Built %s/%s successfully." % (owner, pkg))
except BuildException as ex:
raise CommandException("Failed to build the package: %s" % ex)
def log(package):
"""
List all of the changes to a package on the server.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/log/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
format_str = "%-64s %-19s %s"
print(format_str % ("Hash", "Pushed", "Author"))
for entry in reversed(response.json()['logs']):
ugly = datetime.fromtimestamp(entry['created'])
nice = ugly.strftime("%Y-%m-%d %H:%M:%S")
print(format_str % (entry['hash'], nice, entry['author']))
def push(package, public=False, reupload=False):
"""
Push a Quilt data package to the server
"""
owner, pkg = parse_package(package)
session = _get_session()
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
pkghash = pkgobj.get_hash()
def _push_package(dry_run=False):
data = json.dumps(dict(
dry_run=dry_run,
public=public,
contents=pkgobj.get_contents(),
description="" # TODO
), default=encode_node)
compressed_data = gzip_compress(data.encode('utf-8'))
return session.put(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
hash=pkghash
),
data=compressed_data,
headers={
'Content-Encoding': 'gzip'
}
)
print("Fetching upload URLs from the registry...")
resp = _push_package(dry_run=True)
upload_urls = resp.json()['upload_urls']
obj_queue = sorted(set(find_object_hashes(pkgobj.get_contents())), reverse=True)
total = len(obj_queue)
total_bytes = 0
for obj_hash in obj_queue:
total_bytes += os.path.getsize(pkgobj.get_store().object_path(obj_hash))
uploaded = []
lock = Lock()
headers = {
'Content-Encoding': 'gzip'
}
print("Uploading %d fragments (%d bytes before compression)..." % (total, total_bytes))
with tqdm(total=total_bytes, unit='B', unit_scale=True) as progress:
def _worker_thread():
with requests.Session() as s3_session:
# Retry 500s.
retries = Retry(total=3,
backoff_factor=.5,
status_forcelist=[500, 502, 503, 504])
s3_session.mount('https://', HTTPAdapter(max_retries=retries))
while True:
with lock:
if not obj_queue:
break
obj_hash = obj_queue.pop()
try:
obj_urls = upload_urls[obj_hash]
original_size = os.path.getsize(pkgobj.get_store().object_path(obj_hash))
if reupload or not s3_session.head(obj_urls['head']).ok:
# Create a temporary gzip'ed file.
with pkgobj.tempfile(obj_hash) as temp_file:
temp_file.seek(0, 2)
compressed_size = temp_file.tell()
temp_file.seek(0)
# Workaround for non-local variables in Python 2.7
class Context:
compressed_read = 0
original_last_update = 0
def _progress_cb(count):
Context.compressed_read += count
original_read = Context.compressed_read * original_size // compressed_size
with lock:
progress.update(original_read - Context.original_last_update)
Context.original_last_update = original_read
with FileWithReadProgress(temp_file, _progress_cb) as fd:
url = obj_urls['put']
response = s3_session.put(url, data=fd, headers=headers)
response.raise_for_status()
else:
with lock:
tqdm.write("Fragment %s already uploaded; skipping." % obj_hash)
progress.update(original_size)
with lock:
uploaded.append(obj_hash)
except requests.exceptions.RequestException as ex:
message = "Upload failed for %s:\n" % obj_hash
if ex.response is not None:
message += "URL: %s\nStatus code: %s\nResponse: %r\n" % (
ex.request.url, ex.response.status_code, ex.response.text
)
else:
message += "%s\n" % ex
with lock:
tqdm.write(message)
threads = [
Thread(target=_worker_thread, name="upload-worker-%d" % i)
for i in range(PARALLEL_UPLOADS)
]
for thread in threads:
thread.daemon = True
thread.start()
for thread in threads:
thread.join()
if len(uploaded) != total:
raise CommandException("Failed to upload fragments")
print("Uploading package metadata...")
_push_package()
print("Updating the 'latest' tag...")
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=LATEST_TAG
),
data=json.dumps(dict(
hash=pkghash
))
)
url = "https://quiltdata.com/package/%s/%s" % (owner, pkg)
print("Push complete. %s/%s is live:\n%s" % (owner, pkg, url))
def version_list(package):
"""
List the versions of a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/version/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for version in response.json()['versions']:
print("%s: %s" % (version['version'], version['hash']))
def version_add(package, version, pkghash, force=False):
"""
Add a new version for a given package hash.
Version format needs to follow PEP 440.
Versions are permanent - once created, they cannot be modified or deleted.
"""
owner, pkg = parse_package(package)
session = _get_session()
try:
Version(version)
except ValueError:
url = "https://www.python.org/dev/peps/pep-0440/#examples-of-compliant-version-schemes"
raise CommandException(
"Invalid version format; see %s" % url
)
if not force:
answer = input("Versions cannot be modified or deleted; are you sure? (y/n) ")
if answer.lower() != 'y':
return
session.put(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
version=version
),
data=json.dumps(dict(
hash=_match_hash(session, owner, pkg, pkghash)
))
)
def tag_list(package):
"""
List the tags of a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
response = session.get(
"{url}/api/tag/{owner}/{pkg}/".format(
url=get_registry_url(),
owner=owner,
pkg=pkg
)
)
for tag in response.json()['tags']:
print("%s: %s" % (tag['tag'], tag['hash']))
def tag_add(package, tag, pkghash):
"""
Add a new tag for a given package hash.
Unlike versions, tags can have an arbitrary format, and can be modified
and deleted.
When a package is pushed, it gets the "latest" tag.
"""
owner, pkg = parse_package(package)
session = _get_session()
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
),
data=json.dumps(dict(
hash=_match_hash(session, owner, pkg, pkghash)
))
)
def tag_remove(package, tag):
"""
Delete a tag.
"""
owner, pkg = parse_package(package)
session = _get_session()
session.delete(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
)
)
def install_via_requirements(requirements_str, force=False):
"""
Download multiple Quilt data packages via quilt.xml requirements file.
"""
if requirements_str[0] == '@':
path = requirements_str[1:]
if os.path.isfile(path):
yaml_data = load_yaml(path)
else:
raise CommandException("Requirements file not found: {filename}".format(filename=path))
else:
yaml_data = yaml.load(requirements_str)
for pkginfo in yaml_data['packages']:
owner, pkg, subpath, hash, version, tag = parse_package_extended(pkginfo)
package = owner + '/' + pkg
if subpath is not None:
package += '/' + "/".join(subpath)
install(package, hash, version, tag, force=force)
def install(package, hash=None, version=None, tag=None, force=False):
"""
Download a Quilt data package from the server and install locally.
At most one of `hash`, `version`, or `tag` can be given. If none are
given, `tag` defaults to "latest".
"""
if hash is version is tag is None:
tag = LATEST_TAG
# @filename ==> read from file
# newline = multiple lines ==> multiple requirements
package = package.strip()
if len(package) == 0:
raise CommandException("package name is empty.")
if package[0] == '@' or '\n' in package:
return install_via_requirements(package, force=force)
assert [hash, version, tag].count(None) == 2
owner, pkg, subpath = parse_package(package, allow_subpath=True)
session = _get_session()
store = PackageStore()
existing_pkg = store.get_package(owner, pkg)
if version is not None:
response = session.get(
"{url}/api/version/{owner}/{pkg}/{version}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
version=version
)
)
pkghash = response.json()['hash']
elif tag is not None:
response = session.get(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
tag=tag
)
)
pkghash = response.json()['hash']
else:
pkghash = _match_hash(session, owner, pkg, hash)
assert pkghash is not None
response = session.get(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(),
owner=owner,
pkg=pkg,
hash=pkghash
),
params=dict(
subpath='/'.join(subpath)
)
)
assert response.ok # other responses handled by _handle_response
if existing_pkg is not None and not force:
print("{owner}/{pkg} already installed.".format(owner=owner, pkg=pkg))
overwrite = input("Overwrite? (y/n) ")
if overwrite.lower() != 'y':
return
dataset = response.json(object_hook=decode_node)
response_urls = dataset['urls']
response_contents = dataset['contents']
# Verify contents hash
if pkghash != hash_contents(response_contents):
raise CommandException("Mismatched hash. Try again.")
pkgobj = store.install_package(owner, pkg, response_contents)
with requests.Session() as s3_session:
total = len(response_urls)
for idx, (download_hash, url) in enumerate(sorted(iteritems(response_urls))):
print("Downloading %s (%d/%d)..." % (download_hash, idx + 1, total))
local_filename = store.object_path(download_hash)
if os.path.exists(local_filename):
file_hash = digest_file(local_filename)
if file_hash == download_hash:
print("Fragment already installed; skipping.")
continue
else:
print("Fragment already installed, but has the wrong hash (%s); re-downloading." %
file_hash)
temp_path_gz = store.temporary_object_path(download_hash + '.gz')
with open(temp_path_gz, 'ab') as output_file:
starting_length = output_file.tell()
response = s3_session.get(
url,
headers={
'Range': 'bytes=%d-' % starting_length
},
stream=True,
timeout=(S3_CONNECT_TIMEOUT, S3_READ_TIMEOUT)
)
# RANGE_NOT_SATISFIABLE means, we already have the whole file.
if response.status_code != requests.codes.RANGE_NOT_SATISFIABLE:
if not response.ok:
message = "Download failed for %s:\nURL: %s\nStatus code: %s\nResponse: %r\n" % (
download_hash, response.request.url, response.status_code, response.text
)
raise CommandException(message)
# Fragments have the 'Content-Encoding: gzip' header set to make requests ungzip them
# automatically - but that turned out to be a bad idea because it makes resuming downloads
# impossible.
# HACK: For now, just delete the header. Eventually, update the data in S3.
response.raw.headers.pop('Content-Encoding', None)
# Make sure we're getting the expected range.
content_range = response.headers.get('Content-Range', '')
match = CONTENT_RANGE_RE.match(content_range)
if not match or not int(match.group(1)) == starting_length:
raise CommandException("Unexpected Content-Range: %s" % content_range)
total_length = int(match.group(3))
with tqdm(initial=starting_length, total=total_length, unit='B', unit_scale=True) as progress:
for chunk in response.iter_content(CHUNK_SIZE):
output_file.write(chunk)
progress.update(len(chunk))
# Ungzip the downloaded fragment.
temp_path = store.temporary_object_path(download_hash)
try:
with gzip.open(temp_path_gz, 'rb') as f_in, open(temp_path, 'wb') as f_out:
copyfileobj(f_in, f_out)
finally:
# Delete the file unconditionally - in case it's corrupted and cannot be ungzipped.
os.remove(temp_path_gz)
# Check the hash of the result.
file_hash = digest_file(temp_path)
if file_hash != download_hash:
os.remove(temp_path)
raise CommandException("Fragment hashes do not match: expected %s, got %s." %
(download_hash, file_hash))
move(temp_path, local_filename)
pkgobj.save_contents()
def _setup_env(env, files):
""" process data distribution. """
# TODO: build.yml is not saved in the package system, so re-load it here
with open('build.yml') as fd:
buildfile = next(yaml.load_all(fd), None)
environments = buildfile.get('environments', {})
if env != 'default' and (env not in environments):
raise CommandException(
"environment %s not found in environments: section of build.yml" % env)
if len(environments) == 0:
return files
if env == 'default' and 'default' not in environments:
return files
# TODO: this should be done during quilt push, not during install/import
# (requires server support)
# TODO: add a way to dry-run dataset checking
print('processing environment %s: checking data...' % (env))
environment = environments[env]
dataset = environment.get('dataset')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if isinstance(val, pd.DataFrame):
before_len = len(val)
res = exec_yaml_python(dataset, val, key, '('+key+')')
if not res and res is not None:
raise BuildException("error creating dataset for environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
# TODO: should be done on the server during quilt install
# (requires server support)
print('processing environment %s: slicing data...' % (env))
instance_data = environment.get('instance_data')
for key, val in files.items():
# TODO: debug mode, where we can see which files were skipped
if type(val) == pd.core.frame.DataFrame:
before_len = len(val)
# TODO: pass instance identifier, e.g. instance number N of M
val['.qchash'] = val.apply(lambda x: abs(hash(tuple(x))), axis = 1)
res = exec_yaml_python(instance_data, val, key, '('+key+')')
if res == False:
raise BuildException("error assigning data to instance in environment: %s on file %s" % (
env, key))
print('%s: %s=>%s recs' % (key, before_len, len(qc.data)))
files[key] = qc.data
return files
def access_list(package):
"""
Print list of users who can access a package.
"""
owner, pkg = parse_package(package)
session = _get_session()
lookup_url = "{url}/api/access/{owner}/{pkg}".format(url=get_registry_url(), owner=owner, pkg=pkg)
response = session.get(lookup_url)
data = response.json()
users = data['users']
print('\n'.join(users))
def access_add(package, user):
"""
Add access
"""
owner, pkg = parse_package(package)
session = _get_session()
session.put("%s/api/access/%s/%s/%s" % (get_registry_url(), owner, pkg, user))
def access_remove(package, user):
"""
Remove access
"""
owner, pkg = parse_package(package)
session = _get_session()
session.delete("%s/api/access/%s/%s/%s" % (get_registry_url(), owner, pkg, user))
def delete(package):
"""
Delete a package from the server.
Irreversibly deletes the package along with its history, tags, versions, etc.
"""
owner, pkg = parse_package(package)
answer = input(
"Are you sure you want to delete this package and its entire history? " +
"Type '%s/%s' to confirm: " % (owner, pkg)
)
if answer != '%s/%s' % (owner, pkg):
print("Not deleting.")
return 1
session = _get_session()
session.delete("%s/api/package/%s/%s/" % (get_registry_url(), owner, pkg))
print("Deleted.")
def search(query):
"""
Search for packages
"""
session = _get_session()
response = session.get("%s/api/search/" % get_registry_url(), params=dict(q=query))
packages = response.json()['packages']
for pkg in packages:
print("%(owner)s/%(name)s" % pkg)
def ls(): # pylint:disable=C0103
"""
List all installed Quilt data packages
"""
for pkg_dir in PackageStore.find_store_dirs():
print("%s" % pkg_dir)
packages = PackageStore(pkg_dir).ls_packages()
for idx, (package, tag, pkghash) in enumerate(packages):
tag = "" if tag is None else tag
print("{0:30} {1:20} {2}".format(package, tag, pkghash))
def inspect(package):
"""
Inspect package details
"""
owner, pkg = parse_package(package)
pkgobj = PackageStore.find_package(owner, pkg)
if pkgobj is None:
raise CommandException("Package {owner}/{pkg} not found.".format(owner=owner, pkg=pkg))
def _print_children(children, prefix, path):
for idx, (name, child) in enumerate(children):
if idx == len(children) - 1:
new_prefix = u"└─"
new_child_prefix = u" "
else:
new_prefix = u"├─"
new_child_prefix = u"│ "
_print_node(child, prefix + new_prefix, prefix + new_child_prefix, name, path)
def _print_node(node, prefix, child_prefix, name, path):
name_prefix = u"─ "
if isinstance(node, GroupNode):
children = list(node.children.items())
if children:
name_prefix = u"┬ "
print(prefix + name_prefix + name)
_print_children(children, child_prefix, path + name)
elif isinstance(node, TableNode):
df = pkgobj.get_obj(node)
assert isinstance(df, pd.DataFrame)
info = "shape %s, type \"%s\"" % (df.shape, df.dtypes)
print(prefix + name_prefix + ": " + info)
elif isinstance(node, FileNode):
print(prefix + name_prefix + name)
else:
assert False, "node=%s type=%s" % (node, type(node))
print(pkgobj.get_path())
_print_children(children=pkgobj.get_contents().children.items(), prefix='', path='')
| 1 | 15,378 | duplicate code - can you move into build_from_path() ? | quiltdata-quilt | py |
@@ -9,3 +9,6 @@ def toto(): #pylint: disable=C0102,R1711
# +1: [missing-function-docstring]
def test_enabled_by_id_msg(): #pylint: enable=C0111
pass
+
+def baz(): #pylint: disable=blacklisted-name
+ return 1 | 1 | # -*- encoding=utf-8 -*-
#pylint: disable=C0111
def foo(): #pylint: disable=C0102
return 1
def toto(): #pylint: disable=C0102,R1711
return
# +1: [missing-function-docstring]
def test_enabled_by_id_msg(): #pylint: enable=C0111
pass
| 1 | 12,634 | very important test as we want to make sure the old name still work. | PyCQA-pylint | py |
@@ -1191,5 +1191,5 @@ This code is executed if a gain focus event is received by this object.
Fetches the number of descendants currently selected.
For performance, this method will only count up to the given maxCount number, and if there is one more above that, then sys.maxint is returned stating that many items are selected.
"""
- return 0
+ return 1
| 1 | # -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2017 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda, Babbage B.V., Davy Kager
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import documentBase
import speech
import ui
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
import brailleInput
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject as e:
log.debugWarning("Invalid NVDAObject: %s" % e, exc_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(documentBase.TextContainerObject,baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
#: Indicates if the text selection is anchored at the start.
#: The anchored position is the end that doesn't move when extending or shrinking the selection.
#: For example, if you have no selection and you press shift+rightArrow to select the next character,
#: this will be True.
#: In contrast, if you have no selection and you press shift+leftArrow to select the previous character,
#: this will be False.
#: If the selection is anchored at the end or there is no information this is C{False}.
#: @type: bool
isTextSelectionAnchoredAtStart=True
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def _get_treeInterceptorClass(self):
"""
If this NVDAObject should use a treeInterceptor, then this property provides the L{treeInterceptorHandler.TreeInterceptor} class it should use.
If not then it should be not implemented.
"""
raise NotImplementedError
#: Whether to create a tree interceptor for this object.
#: This is only relevant if L{treeInterceptorClass} is valid.
#: Normally, this should be C{True}.
#: However, for some objects (e.g. ARIA applications), a tree interceptor shouldn't be used by default,
#: but the user may wish to override this.
#: In this case, this can be set to C{False} and updated later.
#: @type: bool
shouldCreateTreeInterceptor = True
def _get_treeInterceptor(self):
"""Retreaves the treeInterceptor associated with this object.
If a treeInterceptor has not been specifically set, the L{treeInterceptorHandler} is asked if it can find a treeInterceptor containing this object.
@return: the treeInterceptor
@rtype: L{treeInterceptorHandler.TreeInterceptor}
"""
if hasattr(self,'_treeInterceptor'):
ti=self._treeInterceptor
if isinstance(ti,weakref.ref):
ti=ti()
if ti and ti in treeInterceptorHandler.runningTable:
return ti
else:
self._treeInterceptor=None
return None
else:
ti=treeInterceptorHandler.getTreeInterceptor(self)
if ti:
self._treeInterceptor=weakref.ref(ti)
return ti
def _set_treeInterceptor(self,obj):
"""Specifically sets a treeInterceptor to be associated with this object.
"""
if obj:
self._treeInterceptor=weakref.ref(obj)
else: #We can't point a weakref to None, so just set the private variable to None, it can handle that
self._treeInterceptor=None
def _get_appModule(self):
"""Retreaves the appModule representing the application this object is a part of by asking L{appModuleHandler}.
@return: the appModule
@rtype: L{appModuleHandler.AppModule}
"""
if not hasattr(self,'_appModuleRef'):
a=appModuleHandler.getAppModuleForNVDAObject(self)
if a:
self._appModuleRef=weakref.ref(a)
return a
else:
return self._appModuleRef()
def _get_name(self):
"""The name or label of this object (example: the text of a button).
@rtype: basestring
"""
return ""
def _get_role(self):
"""The role or type of control this object represents (example: button, list, dialog).
@return: a ROLE_* constant from L{controlTypes}
@rtype: int
"""
return controlTypes.ROLE_UNKNOWN
def _get_roleText(self):
"""
A custom role string for this object, which is used for braille and speech presentation, which will override the standard label for this object's role property.
No string is provided by default, meaning that NVDA will fall back to using role.
Examples of where this property might be overridden are shapes in Powerpoint, or ARIA role descriptions.
"""
return None
def _get_value(self):
"""The value of this object (example: the current percentage of a scrollbar, the selected option in a combo box).
@rtype: basestring
"""
return ""
def _get_description(self):
"""The description or help text of this object.
@rtype: basestring
"""
return ""
def _get_controllerFor(self):
"""Retreaves the object/s that this object controls."""
return []
def _get_actionCount(self):
"""Retreaves the number of actions supported by this object."""
return 0
def getActionName(self,index=None):
"""Retreaves the name of an action supported by this object.
If index is not given then the default action will be used if it exists.
@param index: the optional 0-based index of the wanted action.
@type index: int
@return: the action's name
@rtype: basestring
"""
raise NotImplementedError
def doAction(self,index=None):
"""Performs an action supported by this object.
If index is not given then the default action will be used if it exists.
"""
raise NotImplementedError
def _get_defaultActionIndex(self):
"""Retreaves the index of the action that is the default."""
return 0
def _get_keyboardShortcut(self):
"""The shortcut key that activates this object(example: alt+t).
@rtype: basestring
"""
return ""
def _get_isInForeground(self):
"""
Finds out if this object is currently within the foreground.
"""
raise NotImplementedError
def _get_states(self):
"""Retreaves the current states of this object (example: selected, focused).
@return: a set of STATE_* constants from L{controlTypes}.
@rtype: set of int
"""
return set()
def _get_location(self):
"""The location of this object on the screen.
@return: left, top, width and height of the object.
@rtype: tuple of int
"""
raise NotImplementedError
def _get_locationText(self):
"""A message that explains the location of the object in friendly terms."""
location=self.location
if not location:
return None
(left,top,width,height)=location
deskLocation=api.getDesktopObject().location
(deskLeft,deskTop,deskWidth,deskHeight)=deskLocation
percentFromLeft=(float(left-deskLeft)/deskWidth)*100
percentFromTop=(float(top-deskTop)/deskHeight)*100
percentWidth=(float(width)/deskWidth)*100
percentHeight=(float(height)/deskHeight)*100
# Translators: Reports navigator object's dimensions (example output: object edges positioned 20 per cent from left edge of screen, 10 per cent from top edge of screen, width is 40 per cent of screen, height is 50 per cent of screen).
return _("Object edges positioned {left:.1f} per cent from left edge of screen, {top:.1f} per cent from top edge of screen, width is {width:.1f} per cent of screen, height is {height:.1f} per cent of screen").format(left=percentFromLeft,top=percentFromTop,width=percentWidth,height=percentHeight)
def _get_parent(self):
"""Retreaves this object's parent (the object that contains this object).
@return: the parent object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_container(self):
"""
Exactly like parent, however another object at this same sibling level may be retreaved first (e.g. a groupbox). Mostly used when presenting context such as focus ancestry.
"""
# Cache parent.
parent = self.parent
self.parent = parent
return parent
def _get_next(self):
"""Retreaves the object directly after this object with the same parent.
@return: the next object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_previous(self):
"""Retreaves the object directly before this object with the same parent.
@return: the previous object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_firstChild(self):
"""Retreaves the first object that this object contains.
@return: the first child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_lastChild(self):
"""Retreaves the last object that this object contains.
@return: the last child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_children(self):
"""Retreaves a list of all the objects directly contained by this object (who's parent is this object).
@rtype: list of L{NVDAObject}
"""
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def getChild(self, index):
"""Retrieve a child by index.
@note: Subclasses may override this if they have an efficient way to retrieve a single, arbitrary child.
The base implementation uses L{children}.
@param index: The 0-based index of the child to retrieve.
@type index: int
@return: The child.
@rtype: L{NVDAObject}
"""
return self.children[index]
def _get_rowNumber(self):
"""Retreaves the row number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnNumber(self):
"""Retreaves the column number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_cellCoordsText(self):
"""
An alternative text representation of cell coordinates e.g. "a1". Will override presentation of rowNumber and columnNumber.
Only implement if the representation is really different.
"""
return None
def _get_rowCount(self):
"""Retreaves the number of rows this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnCount(self):
"""Retreaves the number of columns this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_rowSpan(self):
"""The number of rows spanned by this cell.
@rtype: int
"""
raise NotImplementedError
def _get_rowHeaderText(self):
"""The text of the row headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_columnSpan(self):
"""The number of columns spanned by this cell.
@rtype: int
"""
raise NotImplementedError
def _get_columnHeaderText(self):
"""The text of the column headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_table(self):
"""Retreaves the object that represents the table that this object is contained in, if this object is a table cell.
@rtype: L{NVDAObject}
"""
raise NotImplementedError
def _get_tableID(self):
"""The identifier of the table associated with this object if it is a table cell.
This identifier must distinguish this table from other tables.
If this is not implemented, table cell information will still be reported,
but row and column information will always be reported
even if the user moves to a cell in the same row/column.
"""
raise NotImplementedError
def _get_recursiveDescendants(self):
"""Recursively traverse and return the descendants of this object.
This is a depth-first forward traversal.
@return: The recursive descendants of this object.
@rtype: generator of L{NVDAObject}
"""
for child in self.children:
yield child
for recursiveChild in child.recursiveDescendants:
yield recursiveChild
presType_unavailable="unavailable"
presType_layout="layout"
presType_content="content"
def _get_presentationType(self):
states=self.states
if controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_UNAVAILABLE in states:
return self.presType_unavailable
role=self.role
#Static text should be content only if it really use usable text
if role==controlTypes.ROLE_STATICTEXT:
text=self.makeTextInfo(textInfos.POSITION_ALL).text
return self.presType_content if text and not text.isspace() else self.presType_layout
if role in (controlTypes.ROLE_UNKNOWN, controlTypes.ROLE_PANE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_ROOTPANE, controlTypes.ROLE_LAYEREDPANE, controlTypes.ROLE_SCROLLPANE, controlTypes.ROLE_SPLITPANE, controlTypes.ROLE_SECTION, controlTypes.ROLE_PARAGRAPH, controlTypes.ROLE_TITLEBAR, controlTypes.ROLE_LABEL, controlTypes.ROLE_WHITESPACE,controlTypes.ROLE_BORDER):
return self.presType_layout
name = self.name
description = self.description
if not name and not description:
if role in (controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANEL, controlTypes.ROLE_PROPERTYPAGE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_GROUPING,controlTypes.ROLE_OPTIONPANE,controlTypes.ROLE_INTERNALFRAME,controlTypes.ROLE_FORM,controlTypes.ROLE_TABLEBODY):
return self.presType_layout
if role == controlTypes.ROLE_TABLE and not config.conf["documentFormatting"]["reportTables"]:
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
return self.presType_content
def _get_simpleParent(self):
obj=self.parent
while obj and obj.presentationType!=self.presType_content:
obj=obj.parent
return obj
def _findSimpleNext(self,useChild=False,useParent=True,goPrevious=False):
nextPrevAttrib="next" if not goPrevious else "previous"
firstLastChildAttrib="firstChild" if not goPrevious else "lastChild"
found=None
if useChild:
child=getattr(self,firstLastChildAttrib)
childPresType=child.presentationType if child else None
if childPresType==self.presType_content:
found=child
elif childPresType==self.presType_layout:
found=child._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif child:
found=child._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
next=getattr(self,nextPrevAttrib)
nextPresType=next.presentationType if next else None
if nextPresType==self.presType_content:
found=next
elif nextPresType==self.presType_layout:
found=next._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif next:
found=next._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
parent=self.parent if useParent else None
while parent and parent.presentationType!=self.presType_content:
next=parent._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if next:
return next
parent=parent.parent
def _get_simpleNext(self):
return self._findSimpleNext()
def _get_simplePrevious(self):
return self._findSimpleNext(goPrevious=True)
def _get_simpleFirstChild(self):
child=self.firstChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False)
return child
def _get_simpleLastChild(self):
child=self.lastChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False,goPrevious=True)
return child
def _get_childCount(self):
"""Retreaves the number of children this object contains.
@rtype: int
"""
return len(self.children)
def _get_activeChild(self):
"""Retreaves the child of this object that currently has, or contains, the focus.
@return: the active child if it has one else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isFocusable(self):
"""Whether this object is focusable.
@rtype: bool
"""
return controlTypes.STATE_FOCUSABLE in self.states
def _get_hasFocus(self):
"""Whether this object has focus.
@rtype: bool
"""
return controlTypes.STATE_FOCUSED in self.states
def setFocus(self):
"""
Tries to force this object to take the focus.
"""
pass
def scrollIntoView(self):
"""Scroll this object into view on the screen if possible.
"""
raise NotImplementedError
def _get_labeledBy(self):
"""Retreaves the object that this object is labeled by (example: the static text label beside an edit field).
@return: the label object if it has one else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_positionInfo(self):
"""Retreaves position information for this object such as its level, its index with in a group, and the number of items in that group.
@return: a dictionary containing any of level, groupIndex and similarItemsInGroup.
@rtype: dict
"""
return {}
def _get_processID(self):
"""Retreaves an identifyer of the process this object is a part of.
@rtype: int
"""
raise NotImplementedError
def _get_isProtected(self):
"""
@return: True if this object is protected (hides its input for passwords), or false otherwise
@rtype: boolean
"""
# Objects with the protected state, or with a role of passWordEdit should always be protected.
isProtected=(controlTypes.STATE_PROTECTED in self.states or self.role==controlTypes.ROLE_PASSWORDEDIT)
# #7908: If this object is currently protected, keep it protected for the rest of its lifetime.
# The most likely reason it would lose its protected state is because the object is dying.
# In this case it is much more secure to assume it is still protected, thus the end of PIN codes will not be accidentally reported.
if isProtected:
self.isProtected=isProtected
return isProtected
def _get_indexInParent(self):
"""The index of this object in its parent object.
@return: The 0 based index, C{None} if there is no parent.
@rtype: int
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsTo(self):
"""The object to which content flows from this object.
@return: The object to which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsFrom(self):
"""The object from which content flows to this object.
@return: The object from which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_isPresentableFocusAncestor(self):
"""Determine if this object should be presented to the user in the focus ancestry.
@return: C{True} if it should be presented in the focus ancestry, C{False} if not.
@rtype: bool
"""
if self.presentationType in (self.presType_layout, self.presType_unavailable):
return False
if self.role in (controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_LISTITEM, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_EDITABLETEXT):
return False
return True
def _get_statusBar(self):
"""Finds the closest status bar in relation to this object.
@return: the found status bar else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isCurrent(self):
"""Gets the value that indicates whether this object is the current element in a set of related
elements. This maps to aria-current. Normally returns None. If this object is current
it will return one of the following values: "true", "page", "step", "location", "date", "time"
"""
return None
def _get_shouldAcceptShowHideCaretEvent(self):
"""Some objects/applications send show/hide caret events when we don't expect it, such as when the cursor is blinking.
@return: if show/hide caret events should be accepted for this object.
@rtype: Boolean
"""
return True
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
def _get_placeholder(self):
"""If it exists for this object get the value of the placeholder text.
For example this might be the aria-placeholder text for a field in a web page.
@return: the placeholder text else None
@rtype: String or None
"""
log.debug("Potential unimplemented child class: %r" %self)
return None
def _get_landmark(self):
"""If this object represents an ARIA landmark, fetches the ARIA landmark role.
@return: ARIA landmark role else None
@rtype: String or None
"""
return None
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony.
# We want to deal with the entire text, so use the caret object.
info = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
# This gets called for characters which might end a word; e.g. space.
# The character before the caret is the word end.
# The one before that is the last of the word, which is what we want.
info.move(textInfos.UNIT_CHARACTER, -2)
info.expand(textInfos.UNIT_CHARACTER)
fields = info.getTextWithFields()
except RuntimeError:
return
except:
# Focus probably moved.
log.debugWarning("Error fetching last character of previous word", exc_info=True)
return
for command in fields:
if isinstance(command, textInfos.FieldCommand) and command.command == "formatChange" and command.field.get("invalid-spelling"):
break
else:
# No error.
return
import nvwave
nvwave.playWaveFile(r"waves\textError.wav")
def event_liveRegionChange(self):
"""
A base implementation for live region change events.
"""
name=self.name
if name:
ui.message(name)
def event_typedCharacter(self,ch):
if config.conf["documentFormatting"]["reportSpellingErrors"] and config.conf["keyboard"]["alertForSpellingErrors"] and (
# Not alpha, apostrophe or control.
ch.isspace() or (ch >= u" " and ch not in u"'\x7f" and not ch.isalpha())
):
# Reporting of spelling errors is enabled and this character ends a word.
self._reportErrorInPreviousWord()
speech.speakTypedCharacters(ch)
import winUser
if config.conf["keyboard"]["beepForLowercaseWithCapslock"] and ch.islower() and winUser.getKeyState(winUser.VK_CAPITAL)&1:
import tones
tones.beep(3000,40)
def event_mouseMove(self,x,y):
if not self._mouseEntered and config.conf['mouse']['reportObjectRoleOnMouseEnter']:
speech.cancelSpeech()
speech.speakObjectProperties(self,role=True)
speechWasCanceled=True
else:
speechWasCanceled=False
self._mouseEntered=True
try:
info=self.makeTextInfo(textInfos.Point(x,y))
except NotImplementedError:
info=NVDAObjectTextInfo(self,textInfos.POSITION_FIRST)
except LookupError:
return
if config.conf["reviewCursor"]["followMouse"]:
api.setReviewPosition(info)
info.expand(info.unit_mouseChunk)
oldInfo=getattr(self,'_lastMouseTextInfoObject',None)
self._lastMouseTextInfoObject=info
if not oldInfo or info.__class__!=oldInfo.__class__ or info.compareEndPoints(oldInfo,"startToStart")!=0 or info.compareEndPoints(oldInfo,"endToEnd")!=0:
text=info.text
notBlank=False
if text:
for ch in text:
if not ch.isspace() and ch!=u'\ufffc':
notBlank=True
if notBlank:
if not speechWasCanceled:
speech.cancelSpeech()
speech.speakText(text)
def event_stateChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self,states=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_focusEntered(self):
if self.role in (controlTypes.ROLE_MENUBAR,controlTypes.ROLE_POPUPMENU,controlTypes.ROLE_MENUITEM):
speech.cancelSpeech()
return
if self.isPresentableFocusAncestor:
speech.speakObject(self,reason=controlTypes.REASON_FOCUSENTERED)
def event_gainFocus(self):
"""
This code is executed if a gain focus event is received by this object.
"""
self.reportFocus()
braille.handler.handleGainFocus(self)
brailleInput.handler.handleGainFocus(self)
def event_foreground(self):
"""Called when the foreground window changes.
This method should only perform tasks specific to the foreground window changing.
L{event_focusEntered} or L{event_gainFocus} will be called for this object, so this method should not speak/braille the object, etc.
"""
speech.cancelSpeech()
def event_becomeNavigatorObject(self, isFocus=False):
"""Called when this object becomes the navigator object.
@param isFocus: true if the navigator object was set due to a focus change.
@type isFocus: bool
"""
# When the navigator object follows the focus and braille is auto tethered to review,
# we should not update braille with the new review position as a tether to focus is due.
if braille.handler.shouldAutoTether and isFocus:
return
braille.handler.handleReviewMove(shouldAutoTether=not isFocus)
def event_valueChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, value=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_nameChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, name=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_descriptionChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, description=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_caret(self):
if self is api.getFocusObject() and not eventHandler.isPendingEvents("gainFocus"):
braille.handler.handleCaretMove(self)
brailleInput.handler.handleCaretMove(self)
review.handleCaretMove(self)
def _get_flatReviewPosition(self):
"""Locates a TextInfo positioned at this object, in the closest flat review."""
parent=self.simpleParent
while parent:
ti=parent.treeInterceptor
if ti and self in ti and ti.rootNVDAObject==parent:
return ti.makeTextInfo(self)
if issubclass(parent.TextInfo,DisplayModelTextInfo):
try:
return parent.makeTextInfo(api.getReviewPosition().pointAtStart)
except (NotImplementedError,LookupError):
pass
try:
return parent.makeTextInfo(self)
except (NotImplementedError,RuntimeError):
pass
return parent.makeTextInfo(textInfos.POSITION_FIRST)
parent=parent.simpleParent
def _get_basicText(self):
newTime=time.time()
oldTime=getattr(self,'_basicTextTime',0)
if newTime-oldTime>0.5:
self._basicText=u" ".join([x for x in self.name, self.value, self.description if isinstance(x, basestring) and len(x) > 0 and not x.isspace()])
if len(self._basicText)==0:
self._basicText=u""
else:
self._basicTextTime=newTime
return self._basicText
def _get__isTextEmpty(self):
"""
@return C{True} if the text contained in the object is considered empty by the underlying implementation. In most cases this will match {isCollapsed}, however some implementations may consider a single space or line feed as an empty range.
"""
ti = self.makeTextInfo(textInfos.POSITION_FIRST)
ti.move(textInfos.UNIT_CHARACTER, 1, endPoint="end")
return ti.isCollapsed
@staticmethod
def _formatLongDevInfoString(string, truncateLen=250):
"""Format a potentially long string value for inclusion in devInfo.
This should be used for arbitrary string values which aren't usually useful in debugging past a certain length.
If the string is too long to be useful, it will be truncated.
This string should be included as returned. There is no need to call repr.
@param string: The string to format.
@type string: nbasestring
@param truncateLen: The length at which to truncate the string.
@type truncateLen: int
@return: The formatted string.
@rtype: basestring
"""
if isinstance(string, basestring) and len(string) > truncateLen:
return "%r (truncated)" % string[:truncateLen]
return repr(string)
def _get_devInfo(self):
"""Information about this object useful to developers.
Subclasses may extend this, calling the superclass property first.
@return: A list of text strings providing information about this object useful to developers.
@rtype: list of str
"""
info = []
try:
ret = repr(self.name)
except Exception as e:
ret = "exception: %s" % e
info.append("name: %s" % ret)
try:
ret = self.role
for name, const in controlTypes.__dict__.iteritems():
if name.startswith("ROLE_") and ret == const:
ret = name
break
except Exception as e:
ret = "exception: %s" % e
info.append("role: %s" % ret)
try:
stateConsts = dict((const, name) for name, const in controlTypes.__dict__.iteritems() if name.startswith("STATE_"))
ret = ", ".join(
stateConsts.get(state) or str(state)
for state in self.states)
except Exception as e:
ret = "exception: %s" % e
info.append("states: %s" % ret)
try:
ret = repr(self.isFocusable)
except Exception as e:
ret = "exception: %s" % e
info.append("isFocusable: %s" % ret)
try:
ret = repr(self.hasFocus)
except Exception as e:
ret = "exception: %s" % e
info.append("hasFocus: %s" % ret)
try:
ret = repr(self)
except Exception as e:
ret = "exception: %s" % e
info.append("Python object: %s" % ret)
try:
ret = repr(self.__class__.__mro__)
except Exception as e:
ret = "exception: %s" % e
info.append("Python class mro: %s" % ret)
try:
ret = repr(self.description)
except Exception as e:
ret = "exception: %s" % e
info.append("description: %s" % ret)
try:
ret = repr(self.location)
except Exception as e:
ret = "exception: %s" % e
info.append("location: %s" % ret)
formatLong = self._formatLongDevInfoString
try:
ret = formatLong(self.value)
except Exception as e:
ret = "exception: %s" % e
info.append("value: %s" % ret)
try:
ret = repr(self.appModule)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule: %s" % ret)
try:
ret = repr(self.appModule.productName)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productName: %s" % ret)
try:
ret = repr(self.appModule.productVersion)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productVersion: %s" % ret)
try:
ret = repr(self.TextInfo)
except Exception as e:
ret = "exception: %s" % e
info.append("TextInfo: %s" % ret)
return info
def _get_sleepMode(self):
"""Whether NVDA should sleep for this object (e.g. it is self-voicing).
If C{True}, all events and script requests for this object are silently dropped.
@rtype: bool
"""
if self.appModule:
return self.appModule.sleepMode
return False
# Don't cache sleepMode, as it is derived from a property which might change
# and we want the changed value immediately.
_cache_sleepMode = False
def _get_mathMl(self):
"""Obtain the MathML markup for an object containing math content.
This will only be called (and thus only needs to be implemented) for
objects with a role of L{controlTypes.ROLE_MATH}.
@raise LookupError: If MathML can't be retrieved for this object.
"""
raise NotImplementedError
#: The language/locale of this object.
#: @type: basestring
language = None
def _get__hasNavigableText(self):
# The generic NVDAObjectTextInfo by itself is never enough to be navigable
if self.TextInfo is NVDAObjectTextInfo:
return False
role = self.role
states = self.states
if role in (controlTypes.ROLE_EDITABLETEXT,controlTypes.ROLE_TERMINAL,controlTypes.ROLE_DOCUMENT):
# Edit fields, terminals and documents are always navigable
return True
elif controlTypes.STATE_EDITABLE in states:
# Anything that is specifically editable is navigable
return True
else:
return False
def _get_selectionContainer(self):
""" An ancestor NVDAObject which manages the selection for this object and other descendants."""
return None
def getSelectedItemsCount(self,maxCount=2):
"""
Fetches the number of descendants currently selected.
For performance, this method will only count up to the given maxCount number, and if there is one more above that, then sys.maxint is returned stating that many items are selected.
"""
return 0
| 1 | 23,765 | I think returning 1 should some how be moved in to specific support for LibreOffice.. | nvaccess-nvda | py |
@@ -85,6 +85,8 @@ byte *app_sysenter_instr_addr = NULL;
static bool sysenter_hook_failed = false;
#endif
+bool d_r_avx512_code_in_use = false;
+
/* static functions forward references */
static byte *
emit_ibl_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc, | 1 | /* **********************************************************
* Copyright (c) 2010-2019 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* arch.c - x86 architecture specific routines
*/
#include "../globals.h"
#include "../link.h"
#include "../fragment.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "decode.h"
#include "decode_fast.h"
#include "../fcache.h"
#include "proc.h"
#include "instrument.h"
#if defined(DEBUG) || defined(INTERNAL)
# include "disassemble.h"
#endif
/* in interp.c */
void
interp_init(void);
void
interp_exit(void);
/* Thread-shared generated routines.
* We don't allocate the shared_code statically so that we can mark it
* executable.
*/
generated_code_t *shared_code = NULL;
#if defined(X86) && defined(X64)
/* PR 282576: For WOW64 processes we need context switches that swap between 64-bit
* mode and 32-bit mode when executing 32-bit code cache code, as well as
* 32-bit-targeted IBL routines for performance.
*/
generated_code_t *shared_code_x86 = NULL;
/* In x86_to_x64 we can use the extra registers as scratch space.
* The IBL routines are 64-bit and they use r8-r10 freely.
*/
generated_code_t *shared_code_x86_to_x64 = NULL;
#endif
static int syscall_method = SYSCALL_METHOD_UNINITIALIZED;
byte *app_sysenter_instr_addr = NULL;
#ifdef LINUX
static bool sysenter_hook_failed = false;
#endif
/* static functions forward references */
static byte *
emit_ibl_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, ibl_source_fragment_type_t source_fragment_type,
bool thread_shared, bool target_trace_table, ibl_code_t ibl_code[]);
static byte *
emit_syscall_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc,
bool thread_shared);
int
reg_spill_tls_offs(reg_id_t reg)
{
switch (reg) {
case SCRATCH_REG0: return TLS_REG0_SLOT;
case SCRATCH_REG1: return TLS_REG1_SLOT;
case SCRATCH_REG2: return TLS_REG2_SLOT;
case SCRATCH_REG3: return TLS_REG3_SLOT;
#ifdef AARCH64
case SCRATCH_REG4: return TLS_REG4_SLOT;
case SCRATCH_REG5: return TLS_REG5_SLOT;
#endif
}
/* don't assert if another reg passed: used on random regs looking for spills */
return -1;
}
/* For Thumb, we store all the entry points with LSB=0 and rely on anyone
* targeting them to use PC_AS_JMP_TGT().
*/
#ifdef INTERNAL
/* routine can be used for dumping both thread private and the thread shared routines */
static void
dump_emitted_routines(dcontext_t *dcontext, file_t file, const char *code_description,
generated_code_t *code, byte *emitted_pc)
{
byte *last_pc;
/* FIXME i#1551: merge w/ GENCODE_IS_X86 below */
# if defined(X86) && defined(X64)
if (GENCODE_IS_X86(code->gencode_mode)) {
/* parts of x86 gencode are 64-bit but it's hard to know which here
* so we dump all as x86
*/
set_x86_mode(dcontext, true /*x86*/);
}
# endif
print_file(file, "%s routines created:\n", code_description);
{
last_pc = code->gen_start_pc;
do {
const char *ibl_brtype;
const char *ibl_name = get_ibl_routine_name(dcontext, last_pc, &ibl_brtype);
# ifdef WINDOWS
/* must test first, as get_ibl_routine_name will think "bb_ibl_indjmp" */
if (last_pc == code->unlinked_shared_syscall)
print_file(file, "unlinked_shared_syscall:\n");
else if (last_pc == code->shared_syscall)
print_file(file, "shared_syscall:\n");
else
# endif
if (ibl_name)
print_file(file, "%s_%s:\n", ibl_name, ibl_brtype);
else if (last_pc == code->fcache_enter)
print_file(file, "fcache_enter:\n");
else if (last_pc == code->fcache_return)
print_file(file, "fcache_return:\n");
else if (last_pc == code->do_syscall)
print_file(file, "do_syscall:\n");
# ifdef ARM
else if (last_pc == code->fcache_enter_gonative)
print_file(file, "fcache_enter_gonative:\n");
# endif
# ifdef WINDOWS
else if (last_pc == code->fcache_enter_indirect)
print_file(file, "fcache_enter_indirect:\n");
else if (last_pc == code->do_callback_return)
print_file(file, "do_callback_return:\n");
# else
else if (last_pc == code->do_int_syscall)
print_file(file, "do_int_syscall:\n");
else if (last_pc == code->do_int81_syscall)
print_file(file, "do_int81_syscall:\n");
else if (last_pc == code->do_int82_syscall)
print_file(file, "do_int82_syscall:\n");
else if (last_pc == code->do_clone_syscall)
print_file(file, "do_clone_syscall:\n");
# ifdef VMX86_SERVER
else if (last_pc == code->do_vmkuw_syscall)
print_file(file, "do_vmkuw_syscall:\n");
# endif
# endif
# ifdef UNIX
else if (last_pc == code->new_thread_dynamo_start)
print_file(file, "new_thread_dynamo_start:\n");
# endif
# ifdef TRACE_HEAD_CACHE_INCR
else if (last_pc == code->trace_head_incr)
print_file(file, "trace_head_incr:\n");
# endif
else if (last_pc == code->reset_exit_stub)
print_file(file, "reset_exit_stub:\n");
else if (last_pc == code->fcache_return_coarse)
print_file(file, "fcache_return_coarse:\n");
else if (last_pc == code->trace_head_return_coarse)
print_file(file, "trace_head_return_coarse:\n");
# ifdef CLIENT_INTERFACE
else if (last_pc == code->special_ibl_xfer[CLIENT_IBL_IDX])
print_file(file, "client_ibl_xfer:\n");
# endif
# ifdef UNIX
else if (last_pc == code->special_ibl_xfer[NATIVE_PLT_IBL_IDX])
print_file(file, "native_plt_ibl_xfer:\n");
else if (last_pc == code->special_ibl_xfer[NATIVE_RET_IBL_IDX])
print_file(file, "native_ret_ibl_xfer:\n");
# endif
else if (last_pc == code->clean_call_save)
print_file(file, "clean_call_save:\n");
else if (last_pc == code->clean_call_restore)
print_file(file, "clean_call_restore:\n");
last_pc = disassemble_with_bytes(dcontext, last_pc, file);
} while (last_pc < emitted_pc);
print_file(file, "%s routines size: " SSZFMT " / " SSZFMT "\n\n",
code_description, emitted_pc - code->gen_start_pc,
code->commit_end_pc - code->gen_start_pc);
}
# if defined(X86) && defined(X64)
if (GENCODE_IS_X86(code->gencode_mode))
set_x86_mode(dcontext, false /*x64*/);
# endif
}
void
dump_emitted_routines_to_file(dcontext_t *dcontext, const char *filename,
const char *label, generated_code_t *code, byte *stop_pc)
{
file_t file = open_log_file(filename, NULL, 0);
if (file != INVALID_FILE) {
/* FIXME: we currently miss later patches for table & mask, but
* that only changes a few immeds
*/
dump_emitted_routines(dcontext, file, label, code, stop_pc);
close_log_file(file);
} else
ASSERT_NOT_REACHED();
}
#endif /* INTERNAL */
/*** functions exported to src directory ***/
static byte *
code_align_forward(dr_isa_mode_t isa_mode, byte *pc, size_t alignment)
{
byte *new_pc = (byte *)ALIGN_FORWARD(pc, alignment);
DOCHECK(1, { SET_TO_NOPS(isa_mode, vmcode_get_writable_addr(pc), new_pc - pc); });
return new_pc;
}
static byte *
move_to_start_of_cache_line(dr_isa_mode_t isa_mode, byte *pc)
{
return code_align_forward(isa_mode, pc, proc_get_cache_line_size());
}
/* The real size of generated code we need varies by cache line size and
* options like inlining of ibl code. We also generate different routines
* for thread-private and thread-shared. So, we dynamically extend the size
* as we generate. Currently our max is under 5 pages.
*/
#define GENCODE_RESERVE_SIZE (5 * PAGE_SIZE)
#define GENCODE_COMMIT_SIZE \
((size_t)(ALIGN_FORWARD(sizeof(generated_code_t), PAGE_SIZE) + PAGE_SIZE))
static byte *
check_size_and_cache_line(dr_isa_mode_t isa_mode, generated_code_t *code, byte *pc)
{
/* Assumption: no single emit uses more than a page.
* We keep an extra page at all times and release it at the end.
*/
byte *next_pc = move_to_start_of_cache_line(isa_mode, pc);
if ((byte *)ALIGN_FORWARD(pc, PAGE_SIZE) + PAGE_SIZE > code->commit_end_pc) {
ASSERT(code->commit_end_pc + PAGE_SIZE <=
vmcode_get_executable_addr((byte *)code) + GENCODE_RESERVE_SIZE);
heap_mmap_extend_commitment(code->commit_end_pc, PAGE_SIZE,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
code->commit_end_pc += PAGE_SIZE;
}
return next_pc;
}
static void
release_final_page(generated_code_t *code)
{
/* FIXME: have heap_mmap not allocate a guard page, and use our
* extra for that page, to use one fewer total page of address space.
*/
size_t leftover =
(ptr_uint_t)code->commit_end_pc - ALIGN_FORWARD(code->gen_end_pc, PAGE_SIZE);
ASSERT(code->commit_end_pc >= (byte *)ALIGN_FORWARD(code->gen_end_pc, PAGE_SIZE));
ASSERT(ALIGNED(code->commit_end_pc, PAGE_SIZE));
ASSERT(ALIGNED(leftover, PAGE_SIZE));
if (leftover > 0) {
heap_mmap_retract_commitment(code->commit_end_pc - leftover, leftover,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
code->commit_end_pc -= leftover;
}
LOG(THREAD_GET, LOG_EMIT, 1,
"Generated code " PFX ": %d header, " SZFMT " gen, " SZFMT " commit/%d reserve\n",
code, sizeof(*code), code->gen_end_pc - code->gen_start_pc,
(ptr_uint_t)code->commit_end_pc - (ptr_uint_t)code, GENCODE_RESERVE_SIZE);
}
static void
shared_gencode_emit(generated_code_t *gencode _IF_X86_64(bool x86_mode))
{
byte *pc;
/* As ARM mode switches are inexpensive, we do not need separate gencode
* versions and stick with Thumb for all our gencode.
*/
dr_isa_mode_t isa_mode = dr_get_isa_mode(GLOBAL_DCONTEXT);
pc = gencode->gen_start_pc;
/* Temporarily set this so that ibl queries work during generation */
gencode->gen_end_pc = gencode->commit_end_pc;
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->fcache_enter = pc;
pc = emit_fcache_enter_shared(GLOBAL_DCONTEXT, gencode, pc);
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->fcache_return = pc;
pc = emit_fcache_return_shared(GLOBAL_DCONTEXT, gencode, pc);
gencode->fcache_return_end = pc;
if (DYNAMO_OPTION(coarse_units)) {
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->fcache_return_coarse = pc;
pc = emit_fcache_return_coarse(GLOBAL_DCONTEXT, gencode, pc);
gencode->fcache_return_coarse_end = pc;
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->trace_head_return_coarse = pc;
pc = emit_trace_head_return_coarse(GLOBAL_DCONTEXT, gencode, pc);
}
#ifdef WINDOWS_PC_SAMPLE
gencode->fcache_enter_return_end = pc;
#endif
/* PR 244737: thread-private uses shared gencode on x64.
* Should we set the option instead? */
if (USE_SHARED_TRACE_IBL()) {
/* expected to be false for private trace IBL routine */
pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return,
DYNAMO_OPTION(shared_traces)
? IBL_TRACE_SHARED
: IBL_TRACE_PRIVATE, /* source type */
true, /* thread_shared */
true, /* target_trace_table */
gencode->trace_ibl);
}
if (USE_SHARED_BB_IBL()) {
pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return,
IBL_BB_SHARED, /* source_fragment_type */
/* thread_shared */
IF_X86_64_ELSE(true, SHARED_FRAGMENTS_ENABLED()),
!DYNAMO_OPTION(bb_ibl_targets), /* target_trace_table */
gencode->bb_ibl);
}
if (DYNAMO_OPTION(coarse_units)) {
pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc,
/* ibl routines use regular fcache_return */
gencode->fcache_return,
IBL_COARSE_SHARED, /* source_fragment_type */
/* thread_shared */
IF_X86_64_ELSE(true, SHARED_FRAGMENTS_ENABLED()),
!DYNAMO_OPTION(bb_ibl_targets), /*target_trace_table*/
gencode->coarse_ibl);
}
#ifdef WINDOWS_PC_SAMPLE
gencode->ibl_routines_end = pc;
#endif
#if defined(WINDOWS) && !defined(X64)
/* no dispatch needed on x64 since syscall routines are thread-shared */
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->shared_syscall = pc;
pc = emit_shared_syscall_dispatch(GLOBAL_DCONTEXT, pc);
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->unlinked_shared_syscall = pc;
pc = emit_unlinked_shared_syscall_dispatch(GLOBAL_DCONTEXT, pc);
LOG(GLOBAL, LOG_EMIT, 3,
"shared_syscall_dispatch: linked " PFX ", unlinked " PFX "\n",
gencode->shared_syscall, gencode->unlinked_shared_syscall);
}
#endif
#ifdef UNIX
/* must create before emit_do_clone_syscall() in emit_syscall_routines() */
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->new_thread_dynamo_start = pc;
pc = emit_new_thread_dynamo_start(GLOBAL_DCONTEXT, pc);
#endif
#ifdef ARM
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->fcache_enter_gonative = pc;
pc = emit_fcache_enter_gonative(GLOBAL_DCONTEXT, gencode, pc);
#endif
#if defined(X86) && defined(X64)
# ifdef WINDOWS
/* plain fcache_enter indirects through edi, and next_tag is in tls,
* so we don't need a separate routine for callback return
*/
gencode->fcache_enter_indirect = gencode->fcache_enter;
# endif
/* i#821/PR 284029: for now we assume there are no syscalls in x86 code */
if (IF_X64_ELSE(!x86_mode, true)) {
/* PR 244737: syscall routines are all shared */
pc = emit_syscall_routines(GLOBAL_DCONTEXT, gencode, pc, true /*thread-shared*/);
}
#elif defined(UNIX) && defined(HAVE_TLS)
/* PR 212570: we need a thread-shared do_syscall for our vsyscall hook */
/* PR 361894: we don't support sysenter if no TLS */
ASSERT(gencode->do_syscall == NULL || dynamo_initialized /*re-gen*/);
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->do_syscall = pc;
pc = emit_do_syscall(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return,
true /*shared*/, 0, &gencode->do_syscall_offs);
# ifdef AARCHXX
/* ARM has no thread-private gencode, so our clone syscall is shared */
gencode->do_clone_syscall = pc;
pc = emit_do_clone_syscall(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return,
true /*shared*/, &gencode->do_clone_syscall_offs);
# endif
#endif
if (USE_SHARED_GENCODE_ALWAYS()) {
fragment_t *fragment;
/* make reset stub shared */
gencode->reset_exit_stub = pc;
fragment = linkstub_fragment(GLOBAL_DCONTEXT, (linkstub_t *)get_reset_linkstub());
#ifdef X86_64
if (GENCODE_IS_X86(gencode->gencode_mode))
fragment = empty_fragment_mark_x86(fragment);
#endif
/* reset exit stub should look just like a direct exit stub */
pc += insert_exit_stub_other_flags(GLOBAL_DCONTEXT, fragment,
(linkstub_t *)get_reset_linkstub(), pc,
LINK_DIRECT);
}
#ifdef TRACE_HEAD_CACHE_INCR
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->trace_head_incr = pc;
pc = emit_trace_head_incr_shared(GLOBAL_DCONTEXT, pc, gencode->fcache_return);
#endif
if (!special_ibl_xfer_is_thread_private()) {
#ifdef CLIENT_INTERFACE
gencode->special_ibl_xfer[CLIENT_IBL_IDX] = pc;
pc = emit_client_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode);
#endif
#ifdef UNIX
/* i#1238: native exec optimization */
if (DYNAMO_OPTION(native_exec_opt)) {
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->special_ibl_xfer[NATIVE_PLT_IBL_IDX] = pc;
pc = emit_native_plt_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode);
/* native ret */
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->special_ibl_xfer[NATIVE_RET_IBL_IDX] = pc;
pc = emit_native_ret_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode);
}
#endif
}
if (!client_clean_call_is_thread_private()) {
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->clean_call_save = pc;
pc = emit_clean_call_save(GLOBAL_DCONTEXT, pc, gencode);
pc = check_size_and_cache_line(isa_mode, gencode, pc);
gencode->clean_call_restore = pc;
pc = emit_clean_call_restore(GLOBAL_DCONTEXT, pc, gencode);
gencode->clean_call_restore_end = pc;
}
ASSERT(pc < gencode->commit_end_pc);
gencode->gen_end_pc = pc;
machine_cache_sync(gencode->gen_start_pc, gencode->gen_end_pc, true);
}
static void shared_gencode_init(IF_X86_64_ELSE(gencode_mode_t gencode_mode, void))
{
generated_code_t *gencode;
ibl_branch_type_t branch_type;
#if defined(X86) && defined(X64)
bool x86_mode = false;
bool x86_to_x64_mode = false;
#endif
gencode = heap_mmap_reserve(GENCODE_RESERVE_SIZE, GENCODE_COMMIT_SIZE,
MEMPROT_EXEC | MEMPROT_READ | MEMPROT_WRITE,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
/* we would return gencode and let caller assign, but emit routines
* that this routine calls query the shared vars so we set here
*/
#if defined(X86) && defined(X64)
switch (gencode_mode) {
case GENCODE_X64: shared_code = gencode; break;
case GENCODE_X86:
/* we do not call set_x86_mode() b/c much of the gencode may be
* 64-bit: it's up the gencode to mark each instr that's 32-bit.
*/
shared_code_x86 = gencode;
x86_mode = true;
break;
case GENCODE_X86_TO_X64:
shared_code_x86_to_x64 = gencode;
x86_to_x64_mode = true;
break;
default: ASSERT_NOT_REACHED();
}
#else
shared_code = gencode;
#endif
generated_code_t *gencode_writable =
(generated_code_t *)vmcode_get_writable_addr((byte *)gencode);
memset(gencode_writable, 0, sizeof(*gencode));
/* Generated code immediately follows struct */
gencode_writable->gen_start_pc = ((byte *)gencode) + sizeof(*gencode);
gencode_writable->commit_end_pc = ((byte *)gencode) + GENCODE_COMMIT_SIZE;
/* Now switch to the writable one. We assume no further code examines the address
* of the struct.
*/
gencode = gencode_writable;
gencode->thread_shared = true;
IF_X86_64(gencode->gencode_mode = gencode_mode);
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
gencode->trace_ibl[branch_type].initialized = false;
gencode->bb_ibl[branch_type].initialized = false;
gencode->coarse_ibl[branch_type].initialized = false;
#if defined(X86) && defined(X64)
/* cache the mode so we can pass just the ibl_code_t around */
gencode->trace_ibl[branch_type].x86_mode = x86_mode;
gencode->trace_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode;
gencode->bb_ibl[branch_type].x86_mode = x86_mode;
gencode->bb_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode;
gencode->coarse_ibl[branch_type].x86_mode = x86_mode;
gencode->coarse_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode;
#endif
}
#if defined(X86) && defined(X64) && defined(WINDOWS)
gencode->shared_syscall_code.x86_mode = x86_mode;
gencode->shared_syscall_code.x86_to_x64_mode = x86_to_x64_mode;
#endif
shared_gencode_emit(gencode _IF_X86_64(x86_mode));
release_final_page(gencode);
DOLOG(3, LOG_EMIT, {
dump_emitted_routines(
GLOBAL_DCONTEXT, GLOBAL,
IF_X86_64_ELSE(x86_mode ? "thread-shared x86" : "thread-shared",
"thread-shared"),
gencode, gencode->gen_end_pc);
});
#ifdef INTERNAL
if (INTERNAL_OPTION(gendump)) {
dump_emitted_routines_to_file(
GLOBAL_DCONTEXT, "gencode-shared",
IF_X86_64_ELSE(x86_mode ? "thread-shared x86" : "thread-shared",
"thread-shared"),
gencode, gencode->gen_end_pc);
}
#endif
#ifdef WINDOWS_PC_SAMPLE
if (dynamo_options.profile_pcs && dynamo_options.prof_pcs_gencode >= 2 &&
dynamo_options.prof_pcs_gencode <= 32) {
gencode->profile = create_profile(gencode->gen_start_pc, gencode->gen_end_pc,
dynamo_options.prof_pcs_gencode, NULL);
start_profile(gencode->profile);
} else
gencode->profile = NULL;
#endif
gencode->writable = true;
protect_generated_code(gencode, READONLY);
}
#ifdef AARCHXX
/* Called during a reset when all threads are suspended */
void
arch_reset_stolen_reg(void)
{
/* We have no per-thread gencode. We simply re-emit on top of the existing
* shared_code, which means we do not need to update each thread's pointers
* to gencode stored in TLS.
*/
dr_isa_mode_t old_mode;
dcontext_t *dcontext;
# ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
# endif
if (DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset) == dr_reg_stolen)
return;
SYSLOG_INTERNAL_INFO("swapping stolen reg from %s to %s", reg_names[dr_reg_stolen],
reg_names[DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset)]);
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
dr_set_isa_mode(dcontext, DR_ISA_ARM_THUMB, &old_mode);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_reg_stolen = DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset);
ASSERT(dr_reg_stolen >= DR_REG_STOLEN_MIN && dr_reg_stolen <= DR_REG_STOLEN_MAX);
shared_gencode_emit(shared_code);
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_set_isa_mode(dcontext, old_mode, NULL);
DOLOG(3, LOG_EMIT, {
dump_emitted_routines(GLOBAL_DCONTEXT, GLOBAL, "swap stolen reg", shared_code,
shared_code->gen_end_pc);
});
}
void
arch_mcontext_reset_stolen_reg(dcontext_t *dcontext, priv_mcontext_t *mc)
{
/* Put the app value in the old stolen reg */
*(reg_t *)(((byte *)mc) +
opnd_get_reg_dcontext_offs(DR_REG_R0 + INTERNAL_OPTION(steal_reg))) =
dcontext->local_state->spill_space.reg_stolen;
/* Put the TLs base into the new stolen reg */
set_stolen_reg_val(mc, (reg_t)os_get_dr_tls_base(dcontext));
}
#endif /* AARCHXX */
#if defined(X86) && defined(X64)
/* Sets other-mode ibl targets, for mixed-mode and x86_to_x64 mode */
static void
far_ibl_set_targets(ibl_code_t src_ibl[], ibl_code_t tgt_ibl[])
{
ibl_branch_type_t branch_type;
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
if (src_ibl[branch_type].initialized) {
/* selector was set in emit_far_ibl (but at that point we didn't have
* the other mode's ibl ready for the target)
*/
ASSERT(CHECK_TRUNCATE_TYPE_uint(
(ptr_uint_t)tgt_ibl[branch_type].indirect_branch_lookup_routine));
ASSERT(CHECK_TRUNCATE_TYPE_uint(
(ptr_uint_t)tgt_ibl[branch_type].unlinked_ibl_entry));
src_ibl[branch_type].far_jmp_opnd.pc =
(uint)(ptr_uint_t)tgt_ibl[branch_type].indirect_branch_lookup_routine;
src_ibl[branch_type].far_jmp_unlinked_opnd.pc =
(uint)(ptr_uint_t)tgt_ibl[branch_type].unlinked_ibl_entry;
}
}
}
#endif
/* arch-specific initializations */
void
d_r_arch_init(void)
{
ASSERT(sizeof(opnd_t) == EXPECTED_SIZEOF_OPND);
IF_X86(ASSERT(CHECK_TRUNCATE_TYPE_byte(OPSZ_LAST)));
/* This ensures that DR_REG_ enums that may be used as opnd_size_t fit its size.
* Only DR_REG_ enums covered by types listed in template_optype_is_reg can fall
* into this category.
*/
IF_X86(ASSERT(CHECK_TRUNCATE_TYPE_byte(DR_REG_MAX_AS_OPSZ)));
/* ensure our flag sharing is done properly */
ASSERT((uint)LINK_FINAL_INSTR_SHARED_FLAG < (uint)INSTR_FIRST_NON_LINK_SHARED_FLAG);
ASSERT_TRUNCATE(byte, byte, OPSZ_LAST_ENUM);
ASSERT(DR_ISA_ARM_A32 + 1 == DR_ISA_ARM_THUMB); /* ibl relies on this */
#ifdef X86_64
/* We rely on contiguous ranges when computing AVX-512 registers. */
ASSERT(DR_REG_XMM16 == DR_REG_XMM15 + 1);
ASSERT(DR_REG_YMM16 == DR_REG_YMM15 + 1);
ASSERT(DR_REG_ZMM16 == DR_REG_ZMM15 + 1);
#endif
/* Verify that the structures used for a register spill area and to hold IBT
* table addresses & masks for IBL code are laid out as expected. We expect
* the spill area to be at offset 0 within the container struct and for the
* table address/mask pair array to follow immediately after the spill area.
*/
/* FIXME These can be converted into compile-time checks as follows:
*
* lookup_table_access_t table[
* (offsetof(local_state_extended_t, spill_space) == 0 &&
* offsetof(local_state_extended_t, table_space) ==
* sizeof(spill_state_t)) ? IBL_BRANCH_TYPE_END : -1 ];
*
* This isn't self-descriptive, though, so it's not being used right now
* (xref case 7097).
*/
ASSERT(offsetof(local_state_extended_t, spill_space) == 0);
ASSERT(offsetof(local_state_extended_t, table_space) == sizeof(spill_state_t));
#ifdef WINDOWS
/* syscalls_init() should have already set the syscall_method so go ahead
* and create the globlal_do_syscall now */
ASSERT(syscall_method != SYSCALL_METHOD_UNINITIALIZED);
#endif
#ifdef AARCHXX
dr_reg_stolen = DR_REG_R0 + DYNAMO_OPTION(steal_reg);
ASSERT(dr_reg_stolen >= DR_REG_STOLEN_MIN && dr_reg_stolen <= DR_REG_STOLEN_MAX)
#endif
/* Ensure we have no unexpected padding inside structs that include
* priv_mcontext_t (app_state_at_intercept_t and dcontext_t) */
IF_X86(ASSERT(offsetof(priv_mcontext_t, pc) + sizeof(byte *) + PRE_XMM_PADDING ==
offsetof(priv_mcontext_t, simd)));
ASSERT(offsetof(app_state_at_intercept_t, mc) ==
offsetof(app_state_at_intercept_t, start_pc) + sizeof(void *));
/* Try to catch errors in x86.asm offsets for dcontext_t */
ASSERT(sizeof(unprotected_context_t) ==
sizeof(priv_mcontext_t) + IF_WINDOWS_ELSE(IF_X64_ELSE(8, 4), 8) +
IF_CLIENT_INTERFACE_ELSE(5 * sizeof(reg_t), 0));
interp_init();
#ifdef CHECK_RETURNS_SSE2
if (proc_has_feature(FEATURE_SSE2)) {
FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_REQUIRES_SSE2, 2, get_application_name(),
get_application_pid());
}
#endif
if (USE_SHARED_GENCODE()) {
/* thread-shared generated code */
/* Assumption: no single emit uses more than a page.
* We keep an extra page at all times and release it at the end.
* FIXME: have heap_mmap not allocate a guard page, and use our
* extra for that page, to use one fewer total page of address space.
*/
ASSERT(GENCODE_COMMIT_SIZE < GENCODE_RESERVE_SIZE);
shared_gencode_init(IF_X86_64(GENCODE_X64));
#if defined(X86) && defined(X64)
/* FIXME i#49: usually LOL64 has only 32-bit code (kernel has 32-bit syscall
* interface) but for mixed modes how would we know? We'd have to make
* this be initialized lazily on first occurrence.
*/
if (mixed_mode_enabled()) {
generated_code_t *shared_code_opposite_mode;
shared_gencode_init(IF_X64(GENCODE_X86));
if (DYNAMO_OPTION(x86_to_x64)) {
shared_gencode_init(IF_X64(GENCODE_X86_TO_X64));
shared_code_opposite_mode = shared_code_x86_to_x64;
} else
shared_code_opposite_mode = shared_code_x86;
/* Now link the far_ibl for each type to the corresponding regular
* ibl of the opposite mode.
*/
far_ibl_set_targets(shared_code->trace_ibl,
shared_code_opposite_mode->trace_ibl);
far_ibl_set_targets(shared_code->bb_ibl, shared_code_opposite_mode->bb_ibl);
far_ibl_set_targets(shared_code->coarse_ibl,
shared_code_opposite_mode->coarse_ibl);
far_ibl_set_targets(shared_code_opposite_mode->trace_ibl,
shared_code->trace_ibl);
far_ibl_set_targets(shared_code_opposite_mode->bb_ibl, shared_code->bb_ibl);
far_ibl_set_targets(shared_code_opposite_mode->coarse_ibl,
shared_code->coarse_ibl);
}
#endif
}
mangle_init();
}
#ifdef WINDOWS_PC_SAMPLE
static void
arch_extract_profile(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *tpc = get_emitted_routines_code(dcontext _IF_X86_64(mode));
thread_id_t tid = dcontext == GLOBAL_DCONTEXT ? 0 : dcontext->owning_thread;
/* we may not have x86 gencode */
ASSERT(tpc != NULL IF_X86_64(|| mode == GENCODE_X86));
if (tpc != NULL && tpc->profile != NULL) {
ibl_branch_type_t branch_type;
int sum;
protect_generated_code(tpc, WRITABLE);
stop_profile(tpc->profile);
d_r_mutex_lock(&profile_dump_lock);
/* Print the thread id so even if it has no hits we can
* count the # total threads. */
print_file(profile_file, "Profile for thread " TIDFMT "\n", tid);
sum = sum_profile_range(tpc->profile, tpc->fcache_enter,
tpc->fcache_enter_return_end);
if (sum > 0) {
print_file(profile_file,
"\nDumping cache enter/exit code profile "
"(thread " TIDFMT ")\n%d hits\n",
tid, sum);
dump_profile_range(profile_file, tpc->profile, tpc->fcache_enter,
tpc->fcache_enter_return_end);
}
/* Break out the IBL code by trace/BB and opcode types.
* Not worth showing far_ibl hits since should be quite rare.
*/
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
byte *start;
byte *end;
if (tpc->trace_ibl[branch_type].initialized) {
start = tpc->trace_ibl[branch_type].indirect_branch_lookup_routine;
end = start + tpc->trace_ibl[branch_type].ibl_routine_length;
sum = sum_profile_range(tpc->profile, start, end);
if (sum > 0) {
print_file(profile_file,
"\nDumping trace IBL code %s profile "
"(thread " TIDFMT ")\n%d hits\n",
get_branch_type_name(branch_type), tid, sum);
dump_profile_range(profile_file, tpc->profile, start, end);
}
}
if (tpc->bb_ibl[branch_type].initialized) {
start = tpc->bb_ibl[branch_type].indirect_branch_lookup_routine;
end = start + tpc->bb_ibl[branch_type].ibl_routine_length;
sum = sum_profile_range(tpc->profile, start, end);
if (sum > 0) {
print_file(profile_file,
"\nDumping BB IBL code %s profile "
"(thread " TIDFMT ")\n%d hits\n",
get_branch_type_name(branch_type), tid, sum);
dump_profile_range(profile_file, tpc->profile, start, end);
}
}
if (tpc->coarse_ibl[branch_type].initialized) {
start = tpc->coarse_ibl[branch_type].indirect_branch_lookup_routine;
end = start + tpc->coarse_ibl[branch_type].ibl_routine_length;
sum = sum_profile_range(tpc->profile, start, end);
if (sum > 0) {
print_file(profile_file,
"\nDumping coarse IBL code %s profile "
"(thread " TIDFMT ")\n%d hits\n",
get_branch_type_name(branch_type), tid, sum);
dump_profile_range(profile_file, tpc->profile, start, end);
}
}
}
sum = sum_profile_range(tpc->profile, tpc->ibl_routines_end, tpc->profile->end);
if (sum > 0) {
print_file(profile_file,
"\nDumping generated code profile "
"(thread " TIDFMT ")\n%d hits\n",
tid, sum);
dump_profile_range(profile_file, tpc->profile, tpc->ibl_routines_end,
tpc->profile->end);
}
d_r_mutex_unlock(&profile_dump_lock);
free_profile(tpc->profile);
tpc->profile = NULL;
}
}
void
arch_profile_exit()
{
if (USE_SHARED_GENCODE()) {
arch_extract_profile(GLOBAL_DCONTEXT _IF_X64(GENCODE_X64));
IF_X64(arch_extract_profile(GLOBAL_DCONTEXT _IF_X64(GENCODE_X86)));
}
}
#endif /* WINDOWS_PC_SAMPLE */
/* arch-specific atexit cleanup */
void d_r_arch_exit(IF_WINDOWS_ELSE_NP(bool detach_stacked_callbacks, void))
{
/* we only need to unprotect shared_code for profile extraction
* so we do it there to also cover the fast exit path
*/
#ifdef WINDOWS_PC_SAMPLE
arch_profile_exit();
#endif
/* on x64 we have syscall routines in the shared code so can't free if detaching */
if (IF_WINDOWS(IF_X64(!detach_stacked_callbacks &&)) shared_code != NULL) {
heap_munmap(shared_code, GENCODE_RESERVE_SIZE, VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
#if defined(X86) && defined(X64)
if (shared_code_x86 != NULL) {
heap_munmap(shared_code_x86, GENCODE_RESERVE_SIZE,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
if (shared_code_x86_to_x64 != NULL) {
heap_munmap(shared_code_x86_to_x64, GENCODE_RESERVE_SIZE,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
#endif
interp_exit();
mangle_exit();
if (doing_detach) {
/* Clear for possible re-attach. */
shared_code = NULL;
#if defined(X86) && defined(X64)
shared_code_x86 = NULL;
shared_code_x86_to_x64 = NULL;
#endif
app_sysenter_instr_addr = NULL;
#ifdef LINUX
/* If we don't clear this we get asserts on vsyscall hook on re-attach on
* some Linux variants. We don't want to clear on Windows 8+ as that causes
* asserts on re-attach (i#2145).
*/
syscall_method = SYSCALL_METHOD_UNINITIALIZED;
sysenter_hook_failed = false;
#endif
}
}
static byte *
emit_ibl_routine_and_template(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool target_trace_table,
bool inline_ibl_head, bool thread_shared,
ibl_branch_type_t branch_type,
ibl_source_fragment_type_t source_type,
ibl_code_t *ibl_code)
{
/* FIXME i#1551: pass in or store mode in generated_code_t */
dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext);
pc = check_size_and_cache_line(isa_mode, code, pc);
ibl_code->initialized = true;
ibl_code->indirect_branch_lookup_routine = pc;
ibl_code->ibl_head_is_inlined = inline_ibl_head;
ibl_code->thread_shared_routine = thread_shared;
ibl_code->branch_type = branch_type;
ibl_code->source_fragment_type = source_type;
pc = emit_indirect_branch_lookup(dcontext, code, pc, fcache_return_pc,
target_trace_table, inline_ibl_head, ibl_code);
if (inline_ibl_head) {
/* create the inlined ibl template */
pc = check_size_and_cache_line(isa_mode, code, pc);
pc = emit_inline_ibl_stub(dcontext, pc, ibl_code, target_trace_table);
}
ibl_code->far_ibl = pc;
pc = emit_far_ibl(
dcontext, pc, ibl_code,
ibl_code->indirect_branch_lookup_routine _IF_X86_64(&ibl_code->far_jmp_opnd));
ibl_code->far_ibl_unlinked = pc;
pc = emit_far_ibl(
dcontext, pc, ibl_code,
ibl_code->unlinked_ibl_entry _IF_X86_64(&ibl_code->far_jmp_unlinked_opnd));
return pc;
}
static byte *
emit_ibl_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, ibl_source_fragment_type_t source_fragment_type,
bool thread_shared, bool target_trace_table,
ibl_code_t ibl_code_routines[])
{
ibl_branch_type_t branch_type;
/* emit separate routines for each branch type
The goal is to have routines that target different fragment tables
so that we can control for example return targets for RAC,
or we can control inlining if some branch types have better hit ratios.
Currently it only gives us better stats.
*/
/*
N.B.: shared fragments requires -atomic_inlined_linking in order to
inline ibl lookups, but not for private since they're unlinked by another thread
flushing but not linked by anyone but themselves.
*/
bool inline_ibl_head = (IS_IBL_TRACE(source_fragment_type))
? DYNAMO_OPTION(inline_trace_ibl)
: DYNAMO_OPTION(inline_bb_ibl);
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
#ifdef HASHTABLE_STATISTICS
/* ugly asserts but we'll stick with uints to save space */
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(
GET_IBL_TARGET_TABLE(branch_type, target_trace_table) +
offsetof(ibl_table_t, unprot_stats))));
ibl_code_routines[branch_type].unprot_stats_offset =
(uint)GET_IBL_TARGET_TABLE(branch_type, target_trace_table) +
offsetof(ibl_table_t, unprot_stats);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(
GET_IBL_TARGET_TABLE(branch_type, target_trace_table) +
offsetof(ibl_table_t, entry_stats_to_lookup_table))));
ibl_code_routines[branch_type].entry_stats_to_lookup_table_offset =
(uint)GET_IBL_TARGET_TABLE(branch_type, target_trace_table) +
offsetof(ibl_table_t, entry_stats_to_lookup_table);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(
offsetof(unprot_ht_statistics_t, trace_ibl_stats[branch_type]))));
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(
offsetof(unprot_ht_statistics_t, bb_ibl_stats[branch_type]))));
ibl_code_routines[branch_type].hashtable_stats_offset =
(uint)((IS_IBL_TRACE(source_fragment_type))
? offsetof(unprot_ht_statistics_t, trace_ibl_stats[branch_type])
: offsetof(unprot_ht_statistics_t, bb_ibl_stats[branch_type]));
#endif
pc = emit_ibl_routine_and_template(
dcontext, code, pc, fcache_return_pc, target_trace_table, inline_ibl_head,
thread_shared, branch_type, source_fragment_type,
&ibl_code_routines[branch_type]);
}
return pc;
}
static byte *
emit_syscall_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc,
bool thread_shared)
{
/* FIXME i#1551: pass in or store mode in generated_code_t */
dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext);
#ifdef HASHTABLE_STATISTICS
/* Stats for the syscall IBLs (note it is also using the trace
* hashtable, and it never hits!)
*/
# ifdef WINDOWS
/* ugly asserts but we'll stick with uints to save space */
IF_X64(
ASSERT(CHECK_TRUNCATE_TYPE_uint(GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) +
offsetof(ibl_table_t, unprot_stats))));
code->shared_syscall_code.unprot_stats_offset =
(uint)GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) +
offsetof(ibl_table_t, unprot_stats);
IF_X64(ASSERT(
CHECK_TRUNCATE_TYPE_uint(GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) +
offsetof(ibl_table_t, entry_stats_to_lookup_table))));
code->shared_syscall_code.entry_stats_to_lookup_table_offset =
(uint)GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) +
offsetof(ibl_table_t, entry_stats_to_lookup_table);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(
offsetof(unprot_ht_statistics_t, shared_syscall_hit_stats))));
code->shared_syscall_code.hashtable_stats_offset =
(uint)offsetof(unprot_ht_statistics_t, shared_syscall_hit_stats);
# endif /* WINDOWS */
#endif /* HASHTABLE_STATISTICS */
#ifdef WINDOWS
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_callback_return = pc;
pc = emit_do_callback_return(dcontext, pc, code->fcache_return, thread_shared);
if (DYNAMO_OPTION(shared_syscalls)) {
ibl_code_t *ibl_code;
if (DYNAMO_OPTION(disable_traces)) {
ibl_code = DYNAMO_OPTION(shared_bbs)
? &SHARED_GENCODE(code->gencode_mode)->bb_ibl[IBL_SHARED_SYSCALL]
: &code->bb_ibl[IBL_SHARED_SYSCALL];
} else if (DYNAMO_OPTION(shared_traces)) {
ibl_code = &SHARED_GENCODE(code->gencode_mode)->trace_ibl[IBL_SHARED_SYSCALL];
} else {
ibl_code = &code->trace_ibl[IBL_SHARED_SYSCALL];
}
pc = check_size_and_cache_line(isa_mode, code, pc);
code->unlinked_shared_syscall = pc;
pc = emit_shared_syscall(
dcontext, code, pc, &code->shared_syscall_code,
&code->shared_syscall_code.ibl_patch,
ibl_code->indirect_branch_lookup_routine, ibl_code->unlinked_ibl_entry,
!DYNAMO_OPTION(disable_traces), /* target_trace_table */
/* Only a single copy of shared syscall is
* emitted and afterwards it performs an IBL.
* Since both traces and BBs execute shared
* syscall (when trace building isn't disabled),
* we can't target the trace IBT table; otherwise,
* we'd miss marking secondary trace heads after
* a post-trace IBL misses. More comments are
* co-located with emit_shared_syscall().
*/
DYNAMO_OPTION(disable_traces)
? DYNAMO_OPTION(inline_bb_ibl)
: DYNAMO_OPTION(inline_trace_ibl), /* inline_ibl_head */
ibl_code->thread_shared_routine, /* thread_shared */
&code->shared_syscall);
code->end_shared_syscall = pc;
/* Lookup at end of shared_syscall should be able to go to bb or trace,
* unrestricted (will never be an exit from a trace so no secondary trace
* restrictions) -- currently only traces supported so using the trace_ibl
* is OK.
*/
}
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_syscall = pc;
pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0,
&code->do_syscall_offs);
#else /* UNIX */
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_syscall = pc;
pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0,
&code->do_syscall_offs);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_int_syscall = pc;
pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared,
0x80 /*force int*/, &code->do_int_syscall_offs);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_int81_syscall = pc;
pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared,
0x81 /*force int*/, &code->do_int81_syscall_offs);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_int82_syscall = pc;
pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared,
0x82 /*force int*/, &code->do_int82_syscall_offs);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_clone_syscall = pc;
pc = emit_do_clone_syscall(dcontext, code, pc, code->fcache_return, thread_shared,
&code->do_clone_syscall_offs);
# ifdef VMX86_SERVER
pc = check_size_and_cache_line(isa_mode, code, pc);
code->do_vmkuw_syscall = pc;
pc = emit_do_vmkuw_syscall(dcontext, code, pc, code->fcache_return, thread_shared,
&code->do_vmkuw_syscall_offs);
# endif
#endif /* UNIX */
return pc;
}
void
arch_thread_init(dcontext_t *dcontext)
{
byte *pc;
generated_code_t *code;
ibl_branch_type_t branch_type;
dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext);
#ifdef X86
/* Simplest to have a real dcontext for emitting the selfmod code
* and finding the patch offsets so we do it on 1st thread init */
static bool selfmod_init = false;
if (!selfmod_init) {
ASSERT(!dynamo_initialized); /* .data +w */
selfmod_init = true;
set_selfmod_sandbox_offsets(dcontext);
}
#endif
ASSERT_CURIOSITY(proc_is_cache_aligned(get_local_state())
IF_WINDOWS(|| DYNAMO_OPTION(tls_align != 0)));
#if defined(X86) && defined(X64)
/* PR 244737: thread-private uses only shared gencode on x64 */
ASSERT(dcontext->private_code == NULL);
return;
#endif
#ifdef AARCHXX
/* Store addresses we access via TLS from exit stubs and gencode. */
get_local_state_extended()->spill_space.fcache_return =
PC_AS_JMP_TGT(isa_mode, fcache_return_shared_routine());
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
get_local_state_extended()->spill_space.trace_ibl[branch_type].ibl =
PC_AS_JMP_TGT(
isa_mode,
get_ibl_routine(dcontext, IBL_LINKED, IBL_TRACE_SHARED, branch_type));
get_local_state_extended()->spill_space.trace_ibl[branch_type].unlinked =
PC_AS_JMP_TGT(
isa_mode,
get_ibl_routine(dcontext, IBL_UNLINKED, IBL_TRACE_SHARED, branch_type));
get_local_state_extended()->spill_space.bb_ibl[branch_type].ibl = PC_AS_JMP_TGT(
isa_mode, get_ibl_routine(dcontext, IBL_LINKED, IBL_BB_SHARED, branch_type));
get_local_state_extended()->spill_space.bb_ibl[branch_type].unlinked =
PC_AS_JMP_TGT(
isa_mode,
get_ibl_routine(dcontext, IBL_UNLINKED, IBL_BB_SHARED, branch_type));
}
/* Because absolute addresses are impractical on ARM, thread-private uses
* only shared gencode, just like for 64-bit.
*/
ASSERT(dcontext->private_code == NULL);
return;
#endif
/* For detach on windows need to use a separate mmap so we can leave this
* memory around in case of outstanding callbacks when we detach. Without
* detach or on linux could just use one of our heaps (which would save
* a little space, (would then need to coordinate with arch_thread_exit)
*/
ASSERT(GENCODE_COMMIT_SIZE < GENCODE_RESERVE_SIZE);
/* case 9474; share allocation unit w/ thread-private stack */
code = heap_mmap_reserve_post_stack(
dcontext, GENCODE_RESERVE_SIZE, GENCODE_COMMIT_SIZE,
MEMPROT_EXEC | MEMPROT_READ | MEMPROT_WRITE, VMM_SPECIAL_MMAP | VMM_REACHABLE);
ASSERT(code != NULL);
dcontext->private_code = (void *)code;
generated_code_t *code_writable =
(generated_code_t *)vmcode_get_writable_addr((byte *)code);
/* FIXME case 6493: if we split private from shared, remove this
* memset since we will no longer have a bunch of fields we don't use
*/
memset(code_writable, 0, sizeof(*code));
/* Generated code immediately follows struct */
code_writable->gen_start_pc = ((byte *)code) + sizeof(*code);
code_writable->commit_end_pc = ((byte *)code) + GENCODE_COMMIT_SIZE;
/* Now switch to the writable one. We assume no further code examines the address
* of the struct.
*/
code = code_writable;
code->thread_shared = false;
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
code->trace_ibl[branch_type].initialized = false;
code->bb_ibl[branch_type].initialized = false;
code->coarse_ibl[branch_type].initialized = false;
}
pc = code->gen_start_pc;
pc = check_size_and_cache_line(isa_mode, code, pc);
code->fcache_enter = pc;
pc = emit_fcache_enter(dcontext, code, pc);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->fcache_return = pc;
pc = emit_fcache_return(dcontext, code, pc);
code->fcache_return_end = pc;
#ifdef WINDOWS_PC_SAMPLE
code->fcache_enter_return_end = pc;
#endif
/* Currently all ibl routines target the trace hashtable
and we don't yet support basic blocks as targets of an IBL.
However, having separate routines at least enables finer control
over the indirect exit stubs.
This way we have inlined IBL stubs for trace but not in basic blocks.
TODO: After separating the IBL routines, now we can retarget them to separate
hashtables (or alternatively chain several IBL routines together).
From trace ib exits we can only go to {traces}, so no change here.
(when we exit to a basic block we need to mark as a trace head)
From basic block ib exits we should be able to go to {traces + bbs - traceheads}
(for the tracehead bbs we actually have to increment counters.
From shared_syscall we should be able to go to {traces + bbs}.
TODO: we also want to have separate routines per indirect branch types to enable
the restricted control transfer policies to be efficiently enforced.
*/
if (!DYNAMO_OPTION(disable_traces) && DYNAMO_OPTION(shared_trace_ibl_routine)) {
if (!DYNAMO_OPTION(shared_traces)) {
/* copy all bookkeeping information from shared_code into thread private
needed by get_ibl_routine*() */
ibl_branch_type_t ibl_branch_type;
for (ibl_branch_type = IBL_BRANCH_TYPE_START;
ibl_branch_type < IBL_BRANCH_TYPE_END; ibl_branch_type++) {
code->trace_ibl[ibl_branch_type] =
SHARED_GENCODE(code->gencode_mode)->trace_ibl[ibl_branch_type];
}
} /* FIXME: no private traces supported right now w/ -shared_traces */
} else if (PRIVATE_TRACES_ENABLED()) {
/* shared_trace_ibl_routine should be false for private (performance test only) */
pc = emit_ibl_routines(dcontext, code, pc, code->fcache_return,
IBL_TRACE_PRIVATE, /* source_fragment_type */
DYNAMO_OPTION(shared_trace_ibl_routine), /* shared */
true, /* target_trace_table */
code->trace_ibl);
}
pc = emit_ibl_routines(dcontext, code, pc, code->fcache_return,
IBL_BB_PRIVATE, /* source_fragment_type */
/* need thread-private for selfmod regardless of sharing */
false, /* thread_shared */
!DYNAMO_OPTION(bb_ibl_targets), /* target_trace_table */
code->bb_ibl);
#ifdef WINDOWS_PC_SAMPLE
code->ibl_routines_end = pc;
#endif
#if defined(UNIX) && !defined(HAVE_TLS)
/* for HAVE_TLS we use the shared version; w/o TLS we don't
* make any shared routines (PR 361894)
*/
/* must create before emit_do_clone_syscall() in emit_syscall_routines() */
pc = check_size_and_cache_line(isa_mode, code, pc);
code->new_thread_dynamo_start = pc;
pc = emit_new_thread_dynamo_start(dcontext, pc);
#endif
#ifdef WINDOWS
pc = check_size_and_cache_line(isa_mode, code, pc);
code->fcache_enter_indirect = pc;
pc = emit_fcache_enter_indirect(dcontext, code, pc, code->fcache_return);
#endif
pc = emit_syscall_routines(dcontext, code, pc, false /*thread-private*/);
#ifdef TRACE_HEAD_CACHE_INCR
pc = check_size_and_cache_line(isa_mode, code, pc);
code->trace_head_incr = pc;
pc = emit_trace_head_incr(dcontext, pc, code->fcache_return);
#endif
#ifdef CHECK_RETURNS_SSE2_EMIT
/* PR 248210: unsupported feature on x64: need to move to thread-shared gencode
* if want to support it.
*/
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
pc = check_size_and_cache_line(isa_mode, code, pc);
code->pextrw = pc;
pc = emit_pextrw(dcontext, pc);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->pinsrw = pc;
pc = emit_pinsrw(dcontext, pc);
#endif
code->reset_exit_stub = pc;
/* reset exit stub should look just like a direct exit stub */
pc += insert_exit_stub_other_flags(
dcontext, linkstub_fragment(dcontext, (linkstub_t *)get_reset_linkstub()),
(linkstub_t *)get_reset_linkstub(), pc, LINK_DIRECT);
if (special_ibl_xfer_is_thread_private()) {
#ifdef CLIENT_INTERFACE
code->special_ibl_xfer[CLIENT_IBL_IDX] = pc;
pc = emit_client_ibl_xfer(dcontext, pc, code);
#endif
#ifdef UNIX
/* i#1238: native exec optimization */
if (DYNAMO_OPTION(native_exec_opt)) {
pc = check_size_and_cache_line(isa_mode, code, pc);
code->special_ibl_xfer[NATIVE_PLT_IBL_IDX] = pc;
pc = emit_native_plt_ibl_xfer(dcontext, pc, code);
/* native ret */
pc = check_size_and_cache_line(isa_mode, code, pc);
code->special_ibl_xfer[NATIVE_RET_IBL_IDX] = pc;
pc = emit_native_ret_ibl_xfer(dcontext, pc, code);
}
#endif
}
/* XXX: i#1149: we should always use thread shared gencode */
if (client_clean_call_is_thread_private()) {
pc = check_size_and_cache_line(isa_mode, code, pc);
code->clean_call_save = pc;
pc = emit_clean_call_save(dcontext, pc, code);
pc = check_size_and_cache_line(isa_mode, code, pc);
code->clean_call_restore = pc;
pc = emit_clean_call_restore(dcontext, pc, code);
code->clean_call_restore_end = pc;
}
ASSERT(pc < code->commit_end_pc);
code->gen_end_pc = pc;
release_final_page(code);
DOLOG(3, LOG_EMIT,
{ dump_emitted_routines(dcontext, THREAD, "thread-private", code, pc); });
#ifdef INTERNAL
if (INTERNAL_OPTION(gendump)) {
dump_emitted_routines_to_file(dcontext, "gencode-private", "thread-private", code,
pc);
}
#endif
#ifdef WINDOWS_PC_SAMPLE
if (dynamo_options.profile_pcs && dynamo_options.prof_pcs_gencode >= 2 &&
dynamo_options.prof_pcs_gencode <= 32) {
code->profile =
create_profile(code->gen_start_pc, pc, dynamo_options.prof_pcs_gencode, NULL);
start_profile(code->profile);
} else
code->profile = NULL;
#endif
code->writable = true;
/* For SELFPROT_GENCODE we don't make unwritable until after we patch,
* though for hotp_only we don't patch.
*/
#ifdef HOT_PATCHING_INTERFACE
if (DYNAMO_OPTION(hotp_only))
#endif
protect_generated_code(code, READONLY);
}
#ifdef WINDOWS_PC_SAMPLE
void
arch_thread_profile_exit(dcontext_t *dcontext)
{
arch_extract_profile(dcontext _IF_X64(GENCODE_FROM_DCONTEXT));
}
#endif
void
arch_thread_exit(dcontext_t *dcontext _IF_WINDOWS(bool detach_stacked_callbacks))
{
#if defined(X64) || defined(ARM)
/* PR 244737: thread-private uses only shared gencode on x64 */
ASSERT(dcontext->private_code == NULL);
return;
#endif
/* We only need to unprotect private_code for profile extraction
* so we do it there to also cover the fast exit path.
* Also note that for detach w/ stacked callbacks arch_patch_syscall()
* will have already unprotected.
*/
#ifdef WINDOWS
if (!detach_stacked_callbacks && !DYNAMO_OPTION(thin_client)) {
#endif
/* ensure we didn't miss the init patch and leave it writable! */
ASSERT(!TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) ||
!((generated_code_t *)dcontext->private_code)->writable);
#ifdef WINDOWS
}
#endif
#ifdef WINDOWS_PC_SAMPLE
arch_thread_profile_exit(dcontext);
#endif
#ifdef WINDOWS
if (!detach_stacked_callbacks)
#endif
heap_munmap_post_stack(dcontext, dcontext->private_code, GENCODE_RESERVE_SIZE,
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
#ifdef WINDOWS
/* Patch syscall routines for detach */
static void
arch_patch_syscall_common(dcontext_t *dcontext, byte *target _IF_X64(gencode_mode_t mode))
{
generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
if (code != NULL && (!is_shared_gencode(code) || dcontext == GLOBAL_DCONTEXT)) {
/* ensure we didn't miss the init patch and leave it writable! */
ASSERT(!TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) || !code->writable);
/* this is only done for detach, so no need to re-protect */
protect_generated_code(code, WRITABLE);
emit_patch_syscall(dcontext, target _IF_X64(mode));
}
}
void
arch_patch_syscall(dcontext_t *dcontext, byte *target)
{
if (dcontext == GLOBAL_DCONTEXT) {
arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_X64));
IF_X64(arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_X86)));
} else
arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_FROM_DCONTEXT));
}
#endif
void
update_generated_hashtable_access(dcontext_t *dcontext)
{
update_indirect_branch_lookup(dcontext);
}
void
protect_generated_code(generated_code_t *code_in, bool writable)
{
/* i#936: prevent cl v16 (VS2010) from combining the two code->writable
* stores into one prior to the change_protection() call and from
* changing the conditionally-executed stores into always-executed
* stores of conditionally-determined values.
*/
volatile generated_code_t *code =
(generated_code_t *)vmcode_get_writable_addr((byte *)code_in);
if (TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) &&
code->writable != writable) {
byte *genstart = (byte *)PAGE_START(code->gen_start_pc);
if (!writable) {
ASSERT(code->writable);
code->writable = writable;
}
STATS_INC(gencode_prot_changes);
change_protection(vmcode_get_writable_addr(genstart),
code->commit_end_pc - genstart, writable);
if (writable) {
ASSERT(!code->writable);
code->writable = writable;
}
}
}
ibl_source_fragment_type_t
get_source_fragment_type(dcontext_t *dcontext, uint fragment_flags)
{
if (TEST(FRAG_IS_TRACE, fragment_flags)) {
return (TEST(FRAG_SHARED, fragment_flags)) ? IBL_TRACE_SHARED : IBL_TRACE_PRIVATE;
} else if (TEST(FRAG_COARSE_GRAIN, fragment_flags)) {
ASSERT(TEST(FRAG_SHARED, fragment_flags));
return IBL_COARSE_SHARED;
} else {
return (TEST(FRAG_SHARED, fragment_flags)) ? IBL_BB_SHARED : IBL_BB_PRIVATE;
}
}
#ifdef WINDOWS
bool
is_shared_syscall_routine(dcontext_t *dcontext, cache_pc pc)
{
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
return (pc == (cache_pc)shared_code->shared_syscall ||
pc == (cache_pc)shared_code->unlinked_shared_syscall)
IF_X64(||
(shared_code_x86 != NULL &&
(pc == (cache_pc)shared_code_x86->shared_syscall ||
pc == (cache_pc)shared_code_x86->unlinked_shared_syscall)) ||
(shared_code_x86_to_x64 != NULL &&
(pc == (cache_pc)shared_code_x86_to_x64->shared_syscall ||
pc == (cache_pc)shared_code_x86_to_x64->unlinked_shared_syscall)));
} else {
generated_code_t *code = THREAD_GENCODE(dcontext);
return (code != NULL &&
(pc == (cache_pc)code->shared_syscall ||
pc == (cache_pc)code->unlinked_shared_syscall));
}
}
#endif
bool
is_indirect_branch_lookup_routine(dcontext_t *dcontext, cache_pc pc)
{
#ifdef WINDOWS
if (is_shared_syscall_routine(dcontext, pc))
return true;
#endif
/* we only care if it is found */
return get_ibl_routine_type_ex(dcontext, pc, NULL _IF_X86_64(NULL));
}
/* Promotes the current ibl routine from IBL_BB* to IBL_TRACE*
* preserving other properties. There seems to be no need for the
* opposite transformation.
*/
cache_pc
get_trace_ibl_routine(dcontext_t *dcontext, cache_pc current_entry)
{
ibl_type_t ibl_type = { 0 };
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type(dcontext, current_entry, &ibl_type);
ASSERT(is_ibl);
ASSERT(IS_IBL_BB(ibl_type.source_fragment_type));
return
#ifdef WINDOWS
DYNAMO_OPTION(shared_syscalls) &&
is_shared_syscall_routine(dcontext, current_entry)
? current_entry
:
#endif
get_ibl_routine(dcontext, ibl_type.link_state,
(ibl_type.source_fragment_type == IBL_BB_PRIVATE)
? IBL_TRACE_PRIVATE
: IBL_TRACE_SHARED,
ibl_type.branch_type);
}
/* Shifts the current ibl routine from IBL_BB_SHARED to IBL_BB_PRIVATE,
* preserving other properties.
* There seems to be no need for the opposite transformation
*/
cache_pc
get_private_ibl_routine(dcontext_t *dcontext, cache_pc current_entry)
{
ibl_type_t ibl_type = { 0 };
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type(dcontext, current_entry, &ibl_type);
ASSERT(is_ibl);
ASSERT(IS_IBL_BB(ibl_type.source_fragment_type));
return get_ibl_routine(dcontext, ibl_type.link_state, IBL_BB_PRIVATE,
ibl_type.branch_type);
}
/* Shifts the current ibl routine from IBL_BB_PRIVATE to IBL_BB_SHARED,
* preserving other properties.
* There seems to be no need for the opposite transformation
*/
cache_pc
get_shared_ibl_routine(dcontext_t *dcontext, cache_pc current_entry)
{
ibl_type_t ibl_type = { 0 };
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type(dcontext, current_entry, &ibl_type);
ASSERT(is_ibl);
ASSERT(IS_IBL_BB(ibl_type.source_fragment_type));
return get_ibl_routine(dcontext, ibl_type.link_state, IBL_BB_SHARED,
ibl_type.branch_type);
}
/* gets the corresponding routine to current_entry but matching whether
* FRAG_IS_TRACE and FRAG_SHARED are set in flags
*/
cache_pc
get_alternate_ibl_routine(dcontext_t *dcontext, cache_pc current_entry, uint flags)
{
ibl_type_t ibl_type = { 0 };
IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;)
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, current_entry, &ibl_type _IF_X86_64(&mode));
ASSERT(is_ibl);
#ifdef WINDOWS
/* shared_syscalls does not change currently
* FIXME: once we support targeting both private and shared syscall
* we will need to change sharing here
*/
if (DYNAMO_OPTION(shared_syscalls) &&
is_shared_syscall_routine(dcontext, current_entry))
return current_entry;
#endif
return get_ibl_routine_ex(dcontext, ibl_type.link_state,
get_source_fragment_type(dcontext, flags),
ibl_type.branch_type _IF_X86_64(mode));
}
static ibl_entry_point_type_t
get_unlinked_type(ibl_entry_point_type_t link_state)
{
#if defined(X86) && defined(X64)
if (link_state == IBL_TRACE_CMP)
return IBL_TRACE_CMP_UNLINKED;
#endif
if (link_state == IBL_FAR)
return IBL_FAR_UNLINKED;
else
return IBL_UNLINKED;
}
static ibl_entry_point_type_t
get_linked_type(ibl_entry_point_type_t unlink_state)
{
#if defined(X86) && defined(X64)
if (unlink_state == IBL_TRACE_CMP_UNLINKED)
return IBL_TRACE_CMP;
#endif
if (unlink_state == IBL_FAR_UNLINKED)
return IBL_FAR;
else
return IBL_LINKED;
}
cache_pc
get_linked_entry(dcontext_t *dcontext, cache_pc unlinked_entry)
{
ibl_type_t ibl_type = { 0 };
IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;)
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, unlinked_entry, &ibl_type _IF_X86_64(&mode));
ASSERT(is_ibl && IS_IBL_UNLINKED(ibl_type.link_state));
#ifdef WINDOWS
if (unlinked_entry == unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode))) {
return shared_syscall_routine_ex(dcontext _IF_X86_64(mode));
}
#endif
return get_ibl_routine_ex(dcontext,
/* for -unsafe_ignore_eflags_{ibl,trace} the trace cmp
* entry and unlink are both identical, so we may mix
* them up but will have no problems */
get_linked_type(ibl_type.link_state),
ibl_type.source_fragment_type,
ibl_type.branch_type _IF_X86_64(mode));
}
#if defined(X86) && defined(X64)
cache_pc
get_trace_cmp_entry(dcontext_t *dcontext, cache_pc linked_entry)
{
ibl_type_t ibl_type = { 0 };
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type(dcontext, linked_entry, &ibl_type);
IF_WINDOWS(ASSERT(linked_entry != shared_syscall_routine(dcontext)));
ASSERT(is_ibl && ibl_type.link_state == IBL_LINKED);
return get_ibl_routine(dcontext, IBL_TRACE_CMP, ibl_type.source_fragment_type,
ibl_type.branch_type);
}
#endif
cache_pc
get_unlinked_entry(dcontext_t *dcontext, cache_pc linked_entry)
{
ibl_type_t ibl_type = { 0 };
IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;)
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, linked_entry, &ibl_type _IF_X86_64(&mode));
ASSERT(is_ibl && IS_IBL_LINKED(ibl_type.link_state));
#ifdef WINDOWS
if (linked_entry == shared_syscall_routine_ex(dcontext _IF_X86_64(mode)))
return unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode));
#endif
return get_ibl_routine_ex(dcontext, get_unlinked_type(ibl_type.link_state),
ibl_type.source_fragment_type,
ibl_type.branch_type _IF_X86_64(mode));
}
static bool
in_generated_shared_routine(dcontext_t *dcontext, cache_pc pc)
{
if (USE_SHARED_GENCODE()) {
return (pc >= (cache_pc)(shared_code->gen_start_pc) &&
pc < (cache_pc)(shared_code->commit_end_pc))
IF_X86_64(||
(shared_code_x86 != NULL &&
pc >= (cache_pc)(shared_code_x86->gen_start_pc) &&
pc < (cache_pc)(shared_code_x86->commit_end_pc)) ||
(shared_code_x86_to_x64 != NULL &&
pc >= (cache_pc)(shared_code_x86_to_x64->gen_start_pc) &&
pc < (cache_pc)(shared_code_x86_to_x64->commit_end_pc)));
}
return false;
}
bool
in_generated_routine(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (
(pc >= (cache_pc)(code->gen_start_pc) && pc < (cache_pc)(code->commit_end_pc)) ||
in_generated_shared_routine(dcontext, pc));
/* FIXME: what about inlined IBL stubs */
}
static bool
in_fcache_return_for_gencode(generated_code_t *code, cache_pc pc)
{
return pc != NULL &&
((pc >= code->fcache_return && pc < code->fcache_return_end) ||
(pc >= code->fcache_return_coarse && pc < code->fcache_return_coarse_end));
}
bool
in_fcache_return(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
if (in_fcache_return_for_gencode(code, pc))
return true;
if (USE_SHARED_GENCODE()) {
if (in_fcache_return_for_gencode(shared_code, pc))
return true;
#if defined(X86) && defined(X64)
if (shared_code_x86 != NULL && in_fcache_return_for_gencode(shared_code_x86, pc))
return true;
if (shared_code_x86_to_x64 != NULL &&
in_fcache_return_for_gencode(shared_code_x86_to_x64, pc))
return true;
#endif
}
return false;
}
static bool
in_clean_call_save_for_gencode(generated_code_t *code, cache_pc pc)
{
return pc != NULL && pc >= code->clean_call_save && pc < code->clean_call_restore;
}
static bool
in_clean_call_restore_for_gencode(generated_code_t *code, cache_pc pc)
{
return pc != NULL && pc >= code->clean_call_restore &&
pc < code->clean_call_restore_end;
}
bool
in_clean_call_save(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
if (in_clean_call_save_for_gencode(code, pc))
return true;
if (USE_SHARED_GENCODE()) {
if (in_clean_call_save_for_gencode(shared_code, pc))
return true;
#if defined(X86) && defined(X64)
if (shared_code_x86 != NULL &&
in_clean_call_save_for_gencode(shared_code_x86, pc))
return true;
if (shared_code_x86_to_x64 != NULL &&
in_clean_call_save_for_gencode(shared_code_x86_to_x64, pc))
return true;
#endif
}
return false;
}
bool
in_clean_call_restore(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
if (in_clean_call_restore_for_gencode(code, pc))
return true;
if (USE_SHARED_GENCODE()) {
if (in_clean_call_restore_for_gencode(shared_code, pc))
return true;
#if defined(X86) && defined(X64)
if (shared_code_x86 != NULL &&
in_clean_call_restore_for_gencode(shared_code_x86, pc))
return true;
if (shared_code_x86_to_x64 != NULL &&
in_clean_call_restore_for_gencode(shared_code_x86_to_x64, pc))
return true;
#endif
}
return false;
}
bool
in_indirect_branch_lookup_code(dcontext_t *dcontext, cache_pc pc)
{
ibl_source_fragment_type_t source_fragment_type;
ibl_branch_type_t branch_type;
for (source_fragment_type = IBL_SOURCE_TYPE_START;
source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) {
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
if (pc >= get_ibl_routine(dcontext, IBL_LINKED, source_fragment_type,
branch_type) &&
pc < get_ibl_routine(dcontext, IBL_UNLINKED, source_fragment_type,
branch_type))
return true;
}
}
return false; /* not an IBL */
/* FIXME: what about inlined IBL stubs */
}
fcache_enter_func_t
fcache_enter_routine(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (fcache_enter_func_t)convert_data_to_function(code->fcache_enter);
}
/* exported to dispatch.c */
fcache_enter_func_t
get_fcache_enter_private_routine(dcontext_t *dcontext)
{
return fcache_enter_routine(dcontext);
}
fcache_enter_func_t
get_fcache_enter_gonative_routine(dcontext_t *dcontext)
{
#ifdef ARM
generated_code_t *code = THREAD_GENCODE(dcontext);
return (fcache_enter_func_t)convert_data_to_function(code->fcache_enter_gonative);
#else
return fcache_enter_routine(dcontext);
#endif
}
cache_pc
get_reset_exit_stub(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->reset_exit_stub;
}
cache_pc
get_do_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_syscall;
}
#ifdef WINDOWS
fcache_enter_func_t
get_fcache_enter_indirect_routine(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (fcache_enter_func_t)convert_data_to_function(code->fcache_enter_indirect);
}
cache_pc
get_do_callback_return_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_callback_return;
}
#else
/* PR 286922: we need an int syscall even when vsyscall is sys{call,enter} */
cache_pc
get_do_int_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_int_syscall;
}
cache_pc
get_do_int81_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_int81_syscall;
}
cache_pc
get_do_int82_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_int82_syscall;
}
cache_pc
get_do_clone_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_clone_syscall;
}
# ifdef VMX86_SERVER
cache_pc
get_do_vmkuw_syscall_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->do_vmkuw_syscall;
}
# endif
#endif
cache_pc
fcache_return_routine(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->fcache_return;
}
cache_pc
fcache_return_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
return (cache_pc)code->fcache_return;
}
cache_pc fcache_return_coarse_routine(IF_X86_64_ELSE(gencode_mode_t mode, void))
{
generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(DYNAMO_OPTION(coarse_units));
if (code == NULL)
return NULL;
else
return (cache_pc)code->fcache_return_coarse;
}
cache_pc trace_head_return_coarse_routine(IF_X86_64_ELSE(gencode_mode_t mode, void))
{
generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(DYNAMO_OPTION(coarse_units));
if (code == NULL)
return NULL;
else
return (cache_pc)code->trace_head_return_coarse;
}
cache_pc
get_clean_call_save(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code;
if (client_clean_call_is_thread_private())
code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
else
code = get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(code != NULL);
/* FIXME i#1551: NYI on ARM (we need emit_clean_call_save()) */
IF_ARM(ASSERT_NOT_IMPLEMENTED(false));
return (cache_pc)code->clean_call_save;
}
cache_pc
get_clean_call_restore(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code;
if (client_clean_call_is_thread_private())
code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
else
code = get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(code != NULL);
/* FIXME i#1551: NYI on ARM (we need emit_clean_call_restore()) */
IF_ARM(ASSERT_NOT_IMPLEMENTED(false));
return (cache_pc)code->clean_call_restore;
}
static inline cache_pc
get_special_ibl_xfer_entry(dcontext_t *dcontext, int index)
{
generated_code_t *code;
if (special_ibl_xfer_is_thread_private()) {
ASSERT(dcontext != GLOBAL_DCONTEXT);
code = THREAD_GENCODE(dcontext);
} else
code = SHARED_GENCODE_MATCH_THREAD(dcontext);
ASSERT(index >= 0 && index < NUM_SPECIAL_IBL_XFERS);
return code->special_ibl_xfer[index];
}
#ifdef CLIENT_INTERFACE
cache_pc
get_client_ibl_xfer_entry(dcontext_t *dcontext)
{
return get_special_ibl_xfer_entry(dcontext, CLIENT_IBL_IDX);
}
#endif
#ifdef UNIX
cache_pc
get_native_plt_ibl_xfer_entry(dcontext_t *dcontext)
{
return get_special_ibl_xfer_entry(dcontext, NATIVE_PLT_IBL_IDX);
}
cache_pc
get_native_ret_ibl_xfer_entry(dcontext_t *dcontext)
{
return get_special_ibl_xfer_entry(dcontext, NATIVE_RET_IBL_IDX);
}
#endif
/* returns false if target is not an IBL routine.
* if type is not NULL it is set to the type of the found routine.
* if mode_out is NULL, dcontext cannot be GLOBAL_DCONTEXT.
* if mode_out is not NULL, it is set to which mode the found routine is in.
*/
bool
get_ibl_routine_type_ex(dcontext_t *dcontext, cache_pc target,
ibl_type_t *type _IF_X86_64(gencode_mode_t *mode_out))
{
/* This variable is int instead of ibl_entry_point_type_t. This is because
* below we use it as loop index variable which can take negative values.
* It is possible that ibl_entry_point_type_t, which is an enum, has an
* underlying unsigned type which can cause problems due to wrap around.
*/
int link_state;
ibl_source_fragment_type_t source_fragment_type;
ibl_branch_type_t branch_type;
#if defined(X86) && defined(X64)
gencode_mode_t mode;
#endif
/* An up-front range check. Many calls into this routine are with addresses
* outside of the IBL code or the generated_code_t in which IBL resides.
* For all of those cases, this quick up-front check saves the expense of
* examining all of the different IBL entry points.
*/
if ((shared_code == NULL || target < shared_code->gen_start_pc ||
target >= shared_code->gen_end_pc)
IF_X86_64(&&(shared_code_x86 == NULL ||
target < shared_code_x86->gen_start_pc ||
target >= shared_code_x86->gen_end_pc) &&
(shared_code_x86_to_x64 == NULL ||
target < shared_code_x86_to_x64->gen_start_pc ||
target >= shared_code_x86_to_x64->gen_end_pc))) {
if (dcontext == GLOBAL_DCONTEXT || USE_SHARED_GENCODE_ALWAYS() ||
target < ((generated_code_t *)dcontext->private_code)->gen_start_pc ||
target >= ((generated_code_t *)dcontext->private_code)->gen_end_pc)
return false;
}
/* a decent compiler should inline these nested loops */
/* iterate in order <linked, unlinked> */
for (link_state = IBL_LINKED;
/* keep in mind we need a signed comparison when going downwards */
link_state >= (int)IBL_UNLINKED; link_state--) {
/* it is OK to compare to IBL_BB_PRIVATE even when !SHARED_FRAGMENTS_ENABLED() */
for (source_fragment_type = IBL_SOURCE_TYPE_START;
source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) {
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
#if defined(X86) && defined(X64)
for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) {
#endif
if (target ==
get_ibl_routine_ex(dcontext, link_state, source_fragment_type,
branch_type _IF_X86_64(mode))) {
if (type) {
type->link_state = link_state;
type->source_fragment_type = source_fragment_type;
type->branch_type = branch_type;
}
#if defined(X86) && defined(X64)
if (mode_out != NULL)
*mode_out = mode;
#endif
return true;
}
#if defined(X86) && defined(X64)
}
#endif
}
}
}
#ifdef WINDOWS
if (is_shared_syscall_routine(dcontext, target)) {
if (type != NULL) {
type->branch_type = IBL_SHARED_SYSCALL;
type->source_fragment_type = DEFAULT_IBL_BB();
# if defined(X86) && defined(X64)
for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) {
# endif
if (target ==
unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode)))
type->link_state = IBL_UNLINKED;
else
IF_X64(if (target ==
shared_syscall_routine_ex(dcontext _IF_X86_64(mode))))
type->link_state = IBL_LINKED;
# if defined(X86) && defined(X64)
else continue;
if (mode_out != NULL)
*mode_out = mode;
break;
}
# endif
}
return true;
}
#endif
return false; /* not an IBL */
}
bool
get_ibl_routine_type(dcontext_t *dcontext, cache_pc target, ibl_type_t *type)
{
IF_X64(ASSERT(dcontext != GLOBAL_DCONTEXT)); /* should call get_ibl_routine_type_ex */
return get_ibl_routine_type_ex(dcontext, target, type _IF_X86_64(NULL));
}
/* returns false if target is not an IBL template
if type is not NULL it is set to the type of the found routine
*/
static bool
get_ibl_routine_template_type(dcontext_t *dcontext, cache_pc target,
ibl_type_t *type _IF_X86_64(gencode_mode_t *mode_out))
{
ibl_source_fragment_type_t source_fragment_type;
ibl_branch_type_t branch_type;
#if defined(X86) && defined(X64)
gencode_mode_t mode;
#endif
for (source_fragment_type = IBL_SOURCE_TYPE_START;
source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) {
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
#if defined(X86) && defined(X64)
for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) {
#endif
if (target ==
get_ibl_routine_template(dcontext, source_fragment_type,
branch_type _IF_X86_64(mode))) {
if (type) {
type->link_state = IBL_TEMPLATE;
type->source_fragment_type = source_fragment_type;
type->branch_type = branch_type;
#if defined(X86) && defined(X64)
if (mode_out != NULL)
*mode_out = mode;
#endif
}
return true;
#if defined(X86) && defined(X64)
}
#endif
}
}
}
return false; /* not an IBL template */
}
const char *
get_branch_type_name(ibl_branch_type_t branch_type)
{
static const char *const ibl_brtype_names[IBL_BRANCH_TYPE_END] = { "ret", "indcall",
"indjmp" };
return ibl_brtype_names[branch_type];
}
ibl_branch_type_t
get_ibl_branch_type(instr_t *instr)
{
ASSERT(instr_is_mbr(instr) IF_X86(|| instr_get_opcode(instr) == OP_jmp_far ||
instr_get_opcode(instr) == OP_call_far));
if (instr_is_return(instr))
return IBL_RETURN;
else if (instr_is_call_indirect(instr))
return IBL_INDCALL;
else
return IBL_INDJMP;
}
/* returns a symbolic name if target is an IBL routine or an IBL template,
* otherwise returns NULL
*/
const char *
get_ibl_routine_name(dcontext_t *dcontext, cache_pc target, const char **ibl_brtype_name)
{
#if defined(X86) && defined(X64)
static const char
*const ibl_routine_names[3][IBL_SOURCE_TYPE_END][IBL_LINK_STATE_END] = {
{
{ "shared_unlinked_bb_ibl", "shared_delete_bb_ibl", "shared_bb_far",
"shared_bb_far_unlinked", "shared_bb_cmp", "shared_bb_cmp_unlinked",
"shared_bb_ibl", "shared_bb_ibl_template" },
{ "shared_unlinked_trace_ibl", "shared_delete_trace_ibl",
"shared_trace_far", "shared_trace_far_unlinked", "shared_trace_cmp",
"shared_trace_cmp_unlinked", "shared_trace_ibl",
"shared_trace_ibl_template" },
{ "private_unlinked_bb_ibl", "private_delete_bb_ibl", "private_bb_far",
"private_bb_far_unlinked", "private_bb_cmp", "private_bb_cmp_unlinked",
"private_bb_ibl", "private_bb_ibl_template" },
{ "private_unlinked_trace_ibl", "private_delete_trace_ibl",
"private_trace_far", "private_trace_far_unlinked", "private_trace_cmp",
"private_trace_cmp_unlinked", "private_trace_ibl",
"private_trace_ibl_template" },
{ "shared_unlinked_coarse_ibl", "shared_delete_coarse_ibl",
"shared_coarse_trace_far", "shared_coarse_trace_far_unlinked",
"shared_coarse_trace_cmp", "shared_coarse_trace_cmp_unlinked",
"shared_coarse_ibl", "shared_coarse_ibl_template" },
/* PR 282576: for WOW64 processes we have separate x86 routines */
},
{
{ "x86_shared_unlinked_bb_ibl", "x86_shared_delete_bb_ibl",
"x86_shared_bb_far", "x86_shared_bb_far_unlinked",
IF_X64_("x86_shared_bb_cmp")
IF_X64_("x86_shared_bb_cmp_unlinked") "x86_shared_bb_ibl",
"x86_shared_bb_ibl_template" },
{ "x86_shared_unlinked_trace_ibl", "x86_shared_delete_trace_ibl",
"x86_shared_trace_far", "x86_shared_trace_far_unlinked",
IF_X64_("x86_shared_trace_cmp")
IF_X64_("x86_shared_trace_cmp_unlinked") "x86_shared_trace_ibl",
"x86_shared_trace_ibl_template" },
{ "x86_private_unlinked_bb_ibl", "x86_private_delete_bb_ibl",
"x86_private_bb_far", "x86_private_bb_far_unlinked",
IF_X64_("x86_private_bb_cmp")
IF_X64_("x86_private_bb_cmp_unlinked") "x86_private_bb_ibl",
"x86_private_bb_ibl_template" },
{ "x86_private_unlinked_trace_ibl", "x86_private_delete_trace_ibl",
"x86_private_trace_far", "x86_private_trace_far_unlinked",
IF_X64_("x86_private_trace_cmp")
IF_X64_("x86_private_trace_cmp_unlinked") "x86_private_trace_ibl",
"x86_private_trace_ibl_template" },
{ "x86_shared_unlinked_coarse_ibl", "x86_shared_delete_coarse_ibl",
"x86_shared_coarse_trace_far", "x86_shared_coarse_trace_far_unlinked",
IF_X64_("x86_shared_coarse_trace_cmp") IF_X64_(
"x86_shared_coarse_trace_cmp_unlinked") "x86_shared_coarse_ibl",
"x86_shared_coarse_ibl_template" },
},
{
{ "x86_to_x64_shared_unlinked_bb_ibl", "x86_to_x64_shared_delete_bb_ibl",
"x86_to_x64_shared_bb_far", "x86_to_x64_shared_bb_far_unlinked",
"x86_to_x64_shared_bb_cmp", "x86_to_x64_shared_bb_cmp_unlinked",
"x86_to_x64_shared_bb_ibl", "x86_to_x64_shared_bb_ibl_template" },
{ "x86_to_x64_shared_unlinked_trace_ibl",
"x86_to_x64_shared_delete_trace_ibl", "x86_to_x64_shared_trace_far",
"x86_to_x64_shared_trace_far_unlinked", "x86_to_x64_shared_trace_cmp",
"x86_to_x64_shared_trace_cmp_unlinked", "x86_to_x64_shared_trace_ibl",
"x86_to_x64_shared_trace_ibl_template" },
{ "x86_to_x64_private_unlinked_bb_ibl",
"x86_to_x64_private_delete_bb_ibl", "x86_to_x64_private_bb_far",
"x86_to_x64_private_bb_far_unlinked", "x86_to_x64_private_bb_cmp",
"x86_to_x64_private_bb_cmp_unlinked",
/* clang-format off */
"x86_to_x64_private_bb_ibl",
"x86_to_x64_private_bb_ibl_template" },
/* clang-format on */
{ "x86_to_x64_private_unlinked_trace_ibl",
"x86_to_x64_private_delete_trace_ibl", "x86_to_x64_private_trace_far",
"x86_to_x64_private_trace_far_unlinked", "x86_to_x64_private_trace_cmp",
"x86_to_x64_private_trace_cmp_unlinked", "x86_to_x64_private_trace_ibl",
"x86_to_x64_private_trace_ibl_template" },
{ "x86_to_x64_shared_unlinked_coarse_ibl",
"x86_to_x64_shared_delete_coarse_ibl",
"x86_to_x64_shared_coarse_trace_far",
"x86_to_x64_shared_coarse_trace_far_unlinked",
"x86_to_x64_shared_coarse_trace_cmp",
"x86_to_x64_shared_coarse_trace_cmp_unlinked",
"x86_to_x64_shared_coarse_ibl",
"x86_to_x64_shared_coarse_ibl_template" },
}
};
#else
static const char *const ibl_routine_names[IBL_SOURCE_TYPE_END][IBL_LINK_STATE_END] =
{
{ "shared_unlinked_bb_ibl", "shared_delete_bb_ibl", "shared_bb_far",
"shared_bb_far_unlinked", "shared_bb_ibl", "shared_bb_ibl_template" },
{ "shared_unlinked_trace_ibl", "shared_delete_trace_ibl", "shared_trace_far",
"shared_trace_far_unlinked", "shared_trace_ibl",
"shared_trace_ibl_template" },
{ "private_unlinked_bb_ibl", "private_delete_bb_ibl", "private_bb_far",
"private_bb_far_unlinked", "private_bb_ibl", "private_bb_ibl_template" },
{ "private_unlinked_trace_ibl", "private_delete_trace_ibl",
"private_trace_far", "private_trace_far_unlinked", "private_trace_ibl",
"private_trace_ibl_template" },
{ "shared_unlinked_coarse_ibl", "shared_delete_coarse_ibl",
"shared_coarse_trace_far", "shared_coarse_trace_far_unlinked",
"shared_coarse_ibl", "shared_coarse_ibl_template" },
};
#endif
ibl_type_t ibl_type;
#if defined(X86) && defined(X64)
gencode_mode_t mode;
#endif
if (!get_ibl_routine_type_ex(dcontext, target, &ibl_type _IF_X86_64(&mode))) {
/* not an IBL routine */
if (!get_ibl_routine_template_type(dcontext, target,
&ibl_type _IF_X86_64(&mode))) {
return NULL; /* not an IBL template either */
}
}
/* ibl_type is valid and will give routine or template name, and qualifier */
*ibl_brtype_name = get_branch_type_name(ibl_type.branch_type);
return ibl_routine_names IF_X86_64(
[mode])[ibl_type.source_fragment_type][ibl_type.link_state];
}
static inline ibl_code_t *
get_ibl_routine_code_internal(
dcontext_t *dcontext, ibl_source_fragment_type_t source_fragment_type,
ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode))
{
#if defined(X86) && defined(X64)
if (((mode == GENCODE_X86 ||
(mode == GENCODE_FROM_DCONTEXT && dcontext != GLOBAL_DCONTEXT &&
dcontext->isa_mode == DR_ISA_IA32 && !X64_CACHE_MODE_DC(dcontext))) &&
shared_code_x86 == NULL) ||
((mode == GENCODE_X86_TO_X64 ||
(mode == GENCODE_FROM_DCONTEXT && dcontext != GLOBAL_DCONTEXT &&
dcontext->isa_mode == DR_ISA_IA32 && X64_CACHE_MODE_DC(dcontext))) &&
shared_code_x86_to_x64 == NULL))
return NULL;
#endif
switch (source_fragment_type) {
case IBL_BB_SHARED:
if (!USE_SHARED_BB_IBL())
return NULL;
return &(get_shared_gencode(dcontext _IF_X86_64(mode))->bb_ibl[branch_type]);
case IBL_BB_PRIVATE:
return &(
get_emitted_routines_code(dcontext _IF_X86_64(mode))->bb_ibl[branch_type]);
case IBL_TRACE_SHARED:
if (!USE_SHARED_TRACE_IBL())
return NULL;
return &(get_shared_gencode(dcontext _IF_X86_64(mode))->trace_ibl[branch_type]);
case IBL_TRACE_PRIVATE:
return &(
get_emitted_routines_code(dcontext _IF_X86_64(mode))->trace_ibl[branch_type]);
case IBL_COARSE_SHARED:
if (!DYNAMO_OPTION(coarse_units))
return NULL;
return &(get_shared_gencode(dcontext _IF_X86_64(mode))->coarse_ibl[branch_type]);
default: ASSERT_NOT_REACHED();
}
ASSERT_NOT_REACHED();
return NULL;
}
cache_pc
get_ibl_routine_ex(dcontext_t *dcontext, ibl_entry_point_type_t entry_type,
ibl_source_fragment_type_t source_fragment_type,
ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode))
{
ibl_code_t *ibl_code = get_ibl_routine_code_internal(dcontext, source_fragment_type,
branch_type _IF_X86_64(mode));
if (ibl_code == NULL || !ibl_code->initialized)
return NULL;
switch (entry_type) {
case IBL_LINKED: return (cache_pc)ibl_code->indirect_branch_lookup_routine;
case IBL_UNLINKED: return (cache_pc)ibl_code->unlinked_ibl_entry;
case IBL_DELETE: return (cache_pc)ibl_code->target_delete_entry;
case IBL_FAR: return (cache_pc)ibl_code->far_ibl;
case IBL_FAR_UNLINKED: return (cache_pc)ibl_code->far_ibl_unlinked;
#if defined(X86) && defined(X64)
case IBL_TRACE_CMP: return (cache_pc)ibl_code->trace_cmp_entry;
case IBL_TRACE_CMP_UNLINKED: return (cache_pc)ibl_code->trace_cmp_unlinked;
#endif
default: ASSERT_NOT_REACHED();
}
return NULL;
}
cache_pc
get_ibl_routine(dcontext_t *dcontext, ibl_entry_point_type_t entry_type,
ibl_source_fragment_type_t source_fragment_type,
ibl_branch_type_t branch_type)
{
return get_ibl_routine_ex(dcontext, entry_type, source_fragment_type,
branch_type _IF_X86_64(GENCODE_FROM_DCONTEXT));
}
cache_pc
get_ibl_routine_template(dcontext_t *dcontext,
ibl_source_fragment_type_t source_fragment_type,
ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode))
{
ibl_code_t *ibl_code = get_ibl_routine_code_internal(dcontext, source_fragment_type,
branch_type _IF_X86_64(mode));
if (ibl_code == NULL || !ibl_code->initialized)
return NULL;
return ibl_code->inline_ibl_stub_template;
}
/* Convert FRAG_TABLE_* flags to FRAG_* flags */
/* FIXME This seems more appropriate in fragment.c but since there's no
* need for the functionality there, we place it here and inline it. We
* can move it if other pieces need the functionality later.
*/
static inline uint
table_flags_to_frag_flags(dcontext_t *dcontext, ibl_table_t *table)
{
uint flags = 0;
if (TEST(FRAG_TABLE_TARGET_SHARED, table->table_flags))
flags |= FRAG_SHARED;
if (TEST(FRAG_TABLE_TRACE, table->table_flags))
flags |= FRAG_IS_TRACE;
/* We want to make sure that any updates to FRAG_TABLE_* flags
* are reflected in this routine. */
ASSERT_NOT_IMPLEMENTED(!TESTANY(
~(FRAG_TABLE_INCLUSIVE_HIERARCHY | FRAG_TABLE_IBL_TARGETED |
FRAG_TABLE_TARGET_SHARED | FRAG_TABLE_SHARED | FRAG_TABLE_TRACE |
FRAG_TABLE_PERSISTENT | HASHTABLE_USE_ENTRY_STATS | HASHTABLE_ALIGN_TABLE),
table->table_flags));
return flags;
}
/* Derive the PC of an entry point that aids in atomic hashtable deletion.
* FIXME: Once we can correlate from what table the fragment is being
* deleted and therefore type of the corresponding IBL routine, we can
* widen the interface and be more precise about which entry point
* is returned, i.e., specify something other than IBL_GENERIC.
*/
cache_pc
get_target_delete_entry_pc(dcontext_t *dcontext, ibl_table_t *table)
{
/*
* A shared IBL routine makes sure any registers restored on the
* miss path are all saved in the current dcontext - as well as
* copying the ECX in both TLS scratch and dcontext, so it is OK
* to simply return the thread private routine. We have
* proven that they are functionally equivalent (all data in the
* shared lookup is fs indirected to the private dcontext)
*
* FIXME: we can in fact use a global delete_pc entry point that
* is the unlinked path of a shared_ibl_not_found, just like we
* could share all routines. Since it doesn't matter much for now
* we can also return the slightly more efficient private
* ibl_not_found path.
*/
uint frag_flags = table_flags_to_frag_flags(dcontext, table);
ASSERT(dcontext != GLOBAL_DCONTEXT);
return (cache_pc)get_ibl_routine(dcontext, IBL_DELETE,
get_source_fragment_type(dcontext, frag_flags),
table->branch_type);
}
ibl_code_t *
get_ibl_routine_code_ex(dcontext_t *dcontext, ibl_branch_type_t branch_type,
uint fragment_flags _IF_X86_64(gencode_mode_t mode))
{
ibl_source_fragment_type_t source_fragment_type =
get_source_fragment_type(dcontext, fragment_flags);
ibl_code_t *ibl_code = get_ibl_routine_code_internal(dcontext, source_fragment_type,
branch_type _IF_X86_64(mode));
ASSERT(ibl_code != NULL);
return ibl_code;
}
ibl_code_t *
get_ibl_routine_code(dcontext_t *dcontext, ibl_branch_type_t branch_type,
uint fragment_flags)
{
return get_ibl_routine_code_ex(
dcontext, branch_type,
fragment_flags _IF_X86_64(dcontext == GLOBAL_DCONTEXT
? FRAGMENT_GENCODE_MODE(fragment_flags)
: GENCODE_FROM_DCONTEXT));
}
#ifdef WINDOWS
/* FIXME We support a private and shared fragments simultaneously targeting
* shared syscall -- -shared_fragment_shared_syscalls must be on and both
* fragment types target the entry point in shared_code. We could optimize
* the private fragment->shared syscall path (case 8025).
*/
/* PR 282576: These separate routines are ugly, but less ugly than adding param to
* the main routines, which are called in many places and usually passed a
* non-global dcontext; also less ugly than adding GLOBAL_DCONTEXT_X86.
*/
cache_pc
shared_syscall_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code = DYNAMO_OPTION(shared_fragment_shared_syscalls)
? get_shared_gencode(dcontext _IF_X86_64(mode))
: get_emitted_routines_code(dcontext _IF_X86_64(mode));
if (code == NULL)
return NULL;
else
return (cache_pc)code->shared_syscall;
}
cache_pc
shared_syscall_routine(dcontext_t *dcontext)
{
return shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_FROM_DCONTEXT));
}
cache_pc
unlinked_shared_syscall_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code = DYNAMO_OPTION(shared_fragment_shared_syscalls)
? get_shared_gencode(dcontext _IF_X86_64(mode))
: get_emitted_routines_code(dcontext _IF_X86_64(mode));
if (code == NULL)
return NULL;
else
return (cache_pc)code->unlinked_shared_syscall;
}
cache_pc
unlinked_shared_syscall_routine(dcontext_t *dcontext)
{
return unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
}
cache_pc
after_shared_syscall_code(dcontext_t *dcontext)
{
return after_shared_syscall_code_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
}
cache_pc
after_shared_syscall_code_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
ASSERT(code != NULL);
return (cache_pc)(code->unlinked_shared_syscall + code->sys_syscall_offs);
}
cache_pc
after_shared_syscall_addr(dcontext_t *dcontext)
{
ASSERT(get_syscall_method() != SYSCALL_METHOD_UNINITIALIZED);
if (DYNAMO_OPTION(sygate_int) && get_syscall_method() == SYSCALL_METHOD_INT)
return (int_syscall_address + INT_LENGTH /* sizeof int 2e */);
else
return after_shared_syscall_code(dcontext);
}
/* These are Windows-only since Linux needs to disambiguate its two
* versions of do_syscall
*/
cache_pc
after_do_syscall_code(dcontext_t *dcontext)
{
return after_do_syscall_code_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
}
cache_pc
after_do_syscall_code_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode));
ASSERT(code != NULL);
return (cache_pc)(code->do_syscall + code->do_syscall_offs);
}
cache_pc
after_do_syscall_addr(dcontext_t *dcontext)
{
ASSERT(get_syscall_method() != SYSCALL_METHOD_UNINITIALIZED);
if (DYNAMO_OPTION(sygate_int) && get_syscall_method() == SYSCALL_METHOD_INT)
return (int_syscall_address + INT_LENGTH /* sizeof int 2e */);
else
return after_do_syscall_code(dcontext);
}
#else
cache_pc
after_do_shared_syscall_addr(dcontext_t *dcontext)
{
/* PR 212570: return the thread-shared do_syscall used for vsyscall hook */
generated_code_t *code =
get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(GENCODE_X64));
IF_X86_64(ASSERT_NOT_REACHED()); /* else have to worry about GENCODE_X86 */
ASSERT(code != NULL);
ASSERT(code->do_syscall != NULL);
return (cache_pc)(code->do_syscall + code->do_syscall_offs);
}
cache_pc
after_do_syscall_addr(dcontext_t *dcontext)
{
/* PR 212570: return the thread-shared do_syscall used for vsyscall hook */
generated_code_t *code =
get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
ASSERT(code != NULL);
ASSERT(code->do_syscall != NULL);
return (cache_pc)(code->do_syscall + code->do_syscall_offs);
}
bool
is_after_main_do_syscall_addr(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code =
get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
ASSERT(code != NULL);
return (pc == (cache_pc)(code->do_syscall + code->do_syscall_offs));
}
bool
is_after_do_syscall_addr(dcontext_t *dcontext, cache_pc pc)
{
generated_code_t *code =
get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
ASSERT(code != NULL);
return (
pc == (cache_pc)(code->do_syscall + code->do_syscall_offs) ||
pc ==
(cache_pc)(code->do_int_syscall + code->do_int_syscall_offs) IF_VMX86(
||
pc == (cache_pc)(code->do_vmkuw_syscall + code->do_vmkuw_syscall_offs)));
}
#endif
bool
is_after_syscall_address(dcontext_t *dcontext, cache_pc pc)
{
#ifdef WINDOWS
if (pc == after_shared_syscall_addr(dcontext))
return true;
if (pc == after_do_syscall_addr(dcontext))
return true;
return false;
#else
return is_after_do_syscall_addr(dcontext, pc);
#endif
/* NOTE - we ignore global_do_syscall since that's only used in special
* circumstances and is not something the callers (recreate_app_state)
* really know how to handle. */
}
/* needed b/c linux can have sysenter as main syscall method but also
* has generated int syscall routines
*/
bool
is_after_syscall_that_rets(dcontext_t *dcontext, cache_pc pc)
{
#ifdef WINDOWS
return (is_after_syscall_address(dcontext, pc) && does_syscall_ret_to_callsite());
#else
generated_code_t *code =
get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT));
ASSERT(code != NULL);
return (
(pc == (cache_pc)(code->do_syscall + code->do_syscall_offs) &&
does_syscall_ret_to_callsite()) ||
pc ==
(cache_pc)(code->do_int_syscall + code->do_int_syscall_offs) IF_VMX86(
||
pc == (cache_pc)(code->do_vmkuw_syscall + code->do_vmkuw_syscall_offs)));
#endif
}
#ifdef UNIX
/* PR 212290: can't be static code in x86.asm since it can't be PIC */
cache_pc
get_new_thread_start(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode))
{
# ifdef HAVE_TLS
/* for HAVE_TLS we use the shared version; w/o TLS we don't
* make any shared routines (PR 361894)
*/
dcontext = GLOBAL_DCONTEXT;
# endif
generated_code_t *gen = get_emitted_routines_code(dcontext _IF_X86_64(mode));
return gen->new_thread_dynamo_start;
}
#endif
#ifdef TRACE_HEAD_CACHE_INCR
cache_pc
trace_head_incr_routine(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->trace_head_incr;
}
#endif
#ifdef CHECK_RETURNS_SSE2_EMIT
cache_pc
get_pextrw_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->pextrw;
}
cache_pc
get_pinsrw_entry(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
return (cache_pc)code->pinsrw;
}
#endif
/* exported beyond arch/ */
fcache_enter_func_t
get_fcache_enter_shared_routine(dcontext_t *dcontext)
{
return fcache_enter_shared_routine(dcontext);
}
fcache_enter_func_t
fcache_enter_shared_routine(dcontext_t *dcontext)
{
ASSERT(USE_SHARED_GENCODE());
return (fcache_enter_func_t)convert_data_to_function(
SHARED_GENCODE_MATCH_THREAD(dcontext)->fcache_enter);
}
cache_pc fcache_return_shared_routine(IF_X86_64_ELSE(gencode_mode_t mode, void))
{
generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(USE_SHARED_GENCODE());
if (code == NULL)
return NULL;
else
return code->fcache_return;
}
#ifdef TRACE_HEAD_CACHE_INCR
cache_pc trace_head_incr_shared_routine(IF_X86_64_ELSE(gencode_mode_t mode, void))
{
generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode));
ASSERT(USE_SHARED_GENCODE());
if (code == NULL)
return NULL;
else
return code->trace_head_incr;
}
#endif
/* get the fcache target for the next code cache entry */
cache_pc
get_fcache_target(dcontext_t *dcontext)
{
/* we used to use mcontext.pc, but that's in the writable
* portion of the dcontext, and so for self-protection we use the
* next_tag slot, which is protected
*/
return dcontext->next_tag;
}
/* set the fcache target for the next code cache entry */
void
set_fcache_target(dcontext_t *dcontext, cache_pc value)
{
/* we used to use mcontext.pc, but that's in the writable
* portion of the dcontext, and so for self-protection we use the
* next_tag slot, which is protected
*/
dcontext->next_tag = value;
/* set eip as well to complete mcontext state */
get_mcontext(dcontext)->pc = value;
}
/* For 32-bit linux apps on 64-bit kernels we assume that all syscalls that
* we use this for are ok w/ int (i.e., we don't need a sys{call,enter} version).
*/
byte *
get_global_do_syscall_entry()
{
int method = get_syscall_method();
if (method == SYSCALL_METHOD_INT) {
#ifdef WINDOWS
if (DYNAMO_OPTION(sygate_int))
return (byte *)global_do_syscall_sygate_int;
else
#endif
return (byte *)global_do_syscall_int;
} else if (method == SYSCALL_METHOD_SYSENTER) {
#ifdef WINDOWS
if (DYNAMO_OPTION(sygate_sysenter))
return (byte *)global_do_syscall_sygate_sysenter;
else
return (byte *)global_do_syscall_sysenter;
#else
return (byte *)global_do_syscall_int;
#endif
}
#ifdef WINDOWS
else if (method == SYSCALL_METHOD_WOW64)
return (byte *)global_do_syscall_wow64;
#endif
else if (method == SYSCALL_METHOD_SYSCALL) {
#if defined(X86) && defined(X64)
return (byte *)global_do_syscall_syscall;
#else
# ifdef WINDOWS
ASSERT_NOT_IMPLEMENTED(false && "PR 205898: 32-bit syscall on Windows NYI");
# else
return (byte *)global_do_syscall_int;
# endif
#endif
} else {
#ifdef UNIX
/* PR 205310: we sometimes have to execute syscalls before we
* see an app syscall: for a signal default action, e.g.
*/
return (byte *)IF_X86_64_ELSE(global_do_syscall_syscall, global_do_syscall_int);
#else
ASSERT_NOT_REACHED();
#endif
}
return NULL;
}
/* used only by cleanup_and_terminate to avoid the sysenter
* sygate hack version */
byte *
get_cleanup_and_terminate_global_do_syscall_entry()
{
/* see note above: for 32-bit linux apps we use int.
* xref PR 332427 as well where sysenter causes a crash
* if called from cleanup_and_terminate() where ebp is
* left pointing to the old freed stack.
*/
#if defined(WINDOWS) || (defined(X86) && defined(X64))
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER)
return (byte *)global_do_syscall_sysenter;
else
#endif
#ifdef WINDOWS
if (get_syscall_method() == SYSCALL_METHOD_WOW64 && syscall_uses_wow64_index())
return (byte *)global_do_syscall_wow64_index0;
else
#endif
return get_global_do_syscall_entry();
}
#ifdef MACOS
/* There is no single resumption point from sysenter: each sysenter stores
* the caller's retaddr in edx. Thus, there is nothing to hook.
*/
bool
hook_vsyscall(dcontext_t *dcontext, bool method_changing)
{
return false;
}
bool
unhook_vsyscall(void)
{
return false;
}
#elif defined(LINUX)
/* PR 212570: for sysenter support we need to regain control after the
* kernel sets eip to a hardcoded user-mode address on the vsyscall page.
* The vsyscall code layout is as follows:
* 0xffffe400 <__kernel_vsyscall+0>: push %ecx
* 0xffffe401 <__kernel_vsyscall+1>: push %edx
* 0xffffe402 <__kernel_vsyscall+2>: push %ebp
* 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp
* 0xffffe405 <__kernel_vsyscall+5>: sysenter
* nops for alignment of return point:
* 0xffffe407 <__kernel_vsyscall+7>: nop
* 0xffffe408 <__kernel_vsyscall+8>: nop
* 0xffffe409 <__kernel_vsyscall+9>: nop
* 0xffffe40a <__kernel_vsyscall+10>: nop
* 0xffffe40b <__kernel_vsyscall+11>: nop
* 0xffffe40c <__kernel_vsyscall+12>: nop
* 0xffffe40d <__kernel_vsyscall+13>: nop
* system call restart point:
* 0xffffe40e <__kernel_vsyscall+14>: jmp 0xffffe403 <__kernel_vsyscall+3>
* system call normal return point:
* 0xffffe410 <__kernel_vsyscall+16>: pop %ebp
* 0xffffe411 <__kernel_vsyscall+17>: pop %edx
* 0xffffe412 <__kernel_vsyscall+18>: pop %ecx
* 0xffffe413 <__kernel_vsyscall+19>: ret
*
* For randomized vsyscall page locations we can mark the page +w and
* write to it. For now, for simplicity, we focus only on that case;
* for vsyscall page at un-reachable 0xffffe000 we bail out and use
* ints for now (perf hit but works). PR 288330 covers leaving
* as sysenters.
*
* There are either nops or garbage after the ret, so we clobber one
* byte past the ret to put in a rel32 jmp (an alternative is to do
* rel8 jmp into the nop area and have a rel32 jmp there). We
* cleverly copy the 4 bytes of displaced code into the nop area, so
* that 1) we don't have to allocate any memory and 2) we don't have
* to do any extra work in d_r_dispatch, which will naturally go to the
* post-system-call-instr pc.
* Unfortunately the 4.4.8 kernel removed the nops (i#1939) so for
* recent kernels we instead copy into the padding area:
* 0xf77c6be0: push %ecx
* 0xf77c6be1: push %edx
* 0xf77c6be2: push %ebp
* 0xf77c6be3: mov %esp,%ebp
* 0xf77c6be5: sysenter
* 0xf77c6be7: int $0x80
* normal return point:
* 0xf77c6be9: pop %ebp
* 0xf77c6bea: pop %edx
* 0xf77c6beb: pop %ecx
* 0xf77c6bec: ret
* 0xf77c6bed+: <padding>
*
* Using a hook is much simpler than clobbering the retaddr, which is what
* Windows does and then has to spend a lot of effort juggling transparency
* and control on asynch in/out events.
*
* XXX i#2694: We can't handle threads that had never been taken over. Such
* native threads w/o TLS will follow the hook and will crash when spilling
* to TLS post-syscall before the jump to the linkstub. More synchronization
* or no-TLS handling is needed.
*/
# define VSYS_DISPLACED_LEN 4
bool
hook_vsyscall(dcontext_t *dcontext, bool method_changing)
{
# ifdef X86
bool res = true;
instr_t instr;
byte *pc;
uint num_nops = 0;
uint prot;
/* On a call on a method change the method is not yet finalized so we always try
*/
if (get_syscall_method() != SYSCALL_METHOD_SYSENTER && !method_changing)
return false;
ASSERT(DATASEC_WRITABLE(DATASEC_RARELY_PROT));
ASSERT(vsyscall_page_start != NULL && vsyscall_syscall_end_pc != NULL &&
vsyscall_page_start == (app_pc)PAGE_START(vsyscall_syscall_end_pc));
instr_init(dcontext, &instr);
pc = vsyscall_syscall_end_pc;
do {
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
if (instr_is_nop(&instr))
num_nops++;
} while (instr_is_nop(&instr));
vsyscall_sysenter_return_pc = pc;
ASSERT(instr_get_opcode(&instr) == OP_jmp_short ||
instr_get_opcode(&instr) == OP_int /*ubuntu 11.10: i#647*/);
/* We fail if the pattern looks different */
# define CHECK(x) \
do { \
if (!(x)) { \
ASSERT(false && "vsyscall pattern mismatch"); \
res = false; \
goto hook_vsyscall_return; \
} \
} while (0);
/* Only now that we've set vsyscall_sysenter_return_pc do we check writability */
if (!DYNAMO_OPTION(hook_vsyscall)) {
res = false;
goto hook_vsyscall_return;
}
get_memory_info(vsyscall_page_start, NULL, NULL, &prot);
if (!TEST(MEMPROT_WRITE, prot)) {
res = set_protection(vsyscall_page_start, PAGE_SIZE, prot | MEMPROT_WRITE);
if (!res)
goto hook_vsyscall_return;
}
LOG(GLOBAL, LOG_SYSCALLS | LOG_VMAREAS, 1, "Hooking vsyscall page @ " PFX "\n",
vsyscall_sysenter_return_pc);
/* The 5 bytes we'll clobber: */
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
CHECK(instr_get_opcode(&instr) == OP_pop);
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
CHECK(instr_get_opcode(&instr) == OP_pop);
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
CHECK(instr_get_opcode(&instr) == OP_pop);
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
CHECK(instr_get_opcode(&instr) == OP_ret);
/* We don't know what the 5th byte is but we assume that it is junk */
/* FIXME: at some point we should pull out all the hook code from
* callback.c into an os-neutral location. For now, this hook
* is very special-case and simple.
*/
/* For thread synch, the datasec prot lock will serialize us (FIXME: do this at
* init time instead, when see [vdso] page in maps file?)
*/
CHECK(pc - vsyscall_sysenter_return_pc == VSYS_DISPLACED_LEN);
ASSERT(pc + 1 /*nop*/ - vsyscall_sysenter_return_pc == JMP_LONG_LENGTH);
if (num_nops >= VSYS_DISPLACED_LEN) {
CHECK(num_nops >= pc - vsyscall_sysenter_return_pc);
memcpy(vmcode_get_writable_addr(vsyscall_syscall_end_pc),
vsyscall_sysenter_return_pc,
/* we don't copy the 5th byte to preserve nop for nice disassembly */
pc - vsyscall_sysenter_return_pc);
vsyscall_sysenter_displaced_pc = vsyscall_syscall_end_pc;
} else {
/* i#1939: the 4.4.8 kernel removed the nops. It might be safer
* to place the bytes in our own memory somewhere but that requires
* extra logic to mark it as executable and to map the PC for
* dr_fragment_app_pc() and dr_app_pc_for_decoding(), so we go for the
* easier-to-implement route and clobber the padding garbage after the ret.
* We assume it is large enough for the 1 byte from the jmp32 and the
* 4 bytes of displacement. Technically we should map the PC back
* here as well but it's close enough.
*/
pc += 1; /* skip 5th byte of to-be-inserted jmp */
CHECK(PAGE_START(pc) == PAGE_START(pc + VSYS_DISPLACED_LEN));
memcpy(vmcode_get_writable_addr(pc), vsyscall_sysenter_return_pc,
VSYS_DISPLACED_LEN);
vsyscall_sysenter_displaced_pc = pc;
}
insert_relative_jump(vsyscall_sysenter_return_pc,
/* we require a thread-shared fcache_return */
after_do_shared_syscall_addr(dcontext), NOT_HOT_PATCHABLE);
if (!TEST(MEMPROT_WRITE, prot)) {
/* we don't override res here since not much point in not using the
* hook once its in if we failed to re-protect: we're going to have to
* trust the app code here anyway */
DEBUG_DECLARE(bool ok =)
set_protection(vsyscall_page_start, PAGE_SIZE, prot);
ASSERT(ok);
}
hook_vsyscall_return:
instr_free(dcontext, &instr);
return res;
# undef CHECK
# elif defined(AARCHXX)
/* No vsyscall support needed for our ARM targets -- still called on
* os_process_under_dynamorio().
*/
ASSERT(!method_changing);
return false;
# endif /* X86/ARM */
}
bool
unhook_vsyscall(void)
{
# ifdef X86
uint prot;
bool res;
uint len = VSYS_DISPLACED_LEN;
if (get_syscall_method() != SYSCALL_METHOD_SYSENTER)
return false;
ASSERT(!sysenter_hook_failed);
ASSERT(vsyscall_sysenter_return_pc != NULL);
ASSERT(vsyscall_syscall_end_pc != NULL);
get_memory_info(vsyscall_page_start, NULL, NULL, &prot);
if (!TEST(MEMPROT_WRITE, prot)) {
res = set_protection(vsyscall_page_start, PAGE_SIZE, prot | MEMPROT_WRITE);
if (!res)
return false;
}
memcpy(vsyscall_sysenter_return_pc, vsyscall_sysenter_displaced_pc, len);
/* we do not restore the 5th (junk/nop) byte (we never copied it) */
if (vsyscall_sysenter_displaced_pc == vsyscall_syscall_end_pc) /* <4.4.8 */
memset(vmcode_get_writable_addr(vsyscall_syscall_end_pc), RAW_OPCODE_nop, len);
if (!TEST(MEMPROT_WRITE, prot)) {
res = set_protection(vsyscall_page_start, PAGE_SIZE, prot);
ASSERT(res);
}
return true;
# elif defined(AARCHXX)
ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER);
return false;
# endif /* X86/ARM */
}
#endif /* LINUX */
void
check_syscall_method(dcontext_t *dcontext, instr_t *instr)
{
int new_method = SYSCALL_METHOD_UNINITIALIZED;
#ifdef X86
if (instr_get_opcode(instr) == OP_int)
new_method = SYSCALL_METHOD_INT;
else if (instr_get_opcode(instr) == OP_sysenter)
new_method = SYSCALL_METHOD_SYSENTER;
else if (instr_get_opcode(instr) == OP_syscall)
new_method = SYSCALL_METHOD_SYSCALL;
# ifdef WINDOWS
else if (instr_get_opcode(instr) == OP_call_ind)
new_method = SYSCALL_METHOD_WOW64;
# endif
#elif defined(AARCHXX)
if (instr_get_opcode(instr) == OP_svc)
new_method = SYSCALL_METHOD_SVC;
#endif /* X86/ARM */
else
ASSERT_NOT_REACHED();
if (new_method == SYSCALL_METHOD_SYSENTER ||
IF_X64_ELSE(false, new_method == SYSCALL_METHOD_SYSCALL)) {
DO_ONCE({
/* FIXME: DO_ONCE will unprot and reprot, and here we unprot again */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* FIXME : using the raw-bits as the app pc for the instr is
* not really supported, but places in monitor assume it as well */
ASSERT(instr_raw_bits_valid(instr) && !instr_has_allocated_bits(instr));
/* Some places (such as clean_syscall_wrapper) assume that only int system
* calls are used in older versions of windows. */
IF_WINDOWS(ASSERT(get_os_version() > WINDOWS_VERSION_2000 &&
"Expected int syscall method on NT and 2000"));
/* Used by SYSCALL_PC in win32/os.c for non int system calls */
IF_WINDOWS(app_sysenter_instr_addr = instr_get_raw_bits(instr));
/* we expect, only on XP and later or on recent linux kernels,
* indirected syscalls through a certain page, which we record here
* FIXME: don't allow anyone to make this region writable?
*/
/* FIXME : we need to verify that windows lays out all of the
* syscall stuff as expected on AMD chips: xref PR 205898.
*/
/* FIXME: bootstrapping problem...would be nicer to read ahead and find
* syscall before needing to know about page it's on, but for now we just
* check if our initial assignments were correct
*/
vsyscall_syscall_end_pc =
instr_get_raw_bits(instr) + instr_length(dcontext, instr);
IF_WINDOWS({
/* for XP sp0,1 (but not sp2) and 03 fixup boostrap values */
if (vsyscall_page_start == VSYSCALL_PAGE_START_BOOTSTRAP_VALUE) {
vsyscall_page_start = (app_pc)PAGE_START(instr_get_raw_bits(instr));
ASSERT(vsyscall_page_start == VSYSCALL_PAGE_START_BOOTSTRAP_VALUE);
}
if (vsyscall_after_syscall == VSYSCALL_AFTER_SYSCALL_BOOTSTRAP_VALUE) {
/* for XP sp0,1 and 03 the ret is immediately after the
* sysenter instruction */
vsyscall_after_syscall =
instr_get_raw_bits(instr) + instr_length(dcontext, instr);
ASSERT(vsyscall_after_syscall ==
VSYSCALL_AFTER_SYSCALL_BOOTSTRAP_VALUE);
}
});
/* For linux, we should have found "[vdso]" in the maps file, but vsyscall
* is not always on the first vdso page (i#2945).
*/
IF_LINUX({
if (vsyscall_page_start !=
(app_pc)PAGE_START(instr_get_raw_bits(instr))) {
LOG(GLOBAL, LOG_SYSCALLS | LOG_VMAREAS, 2,
"Found vsyscall " PFX " not on 1st vdso page " PFX
", shifting it\n",
instr_get_raw_bits(instr), vsyscall_page_start);
vsyscall_page_start = (app_pc)PAGE_START(instr_get_raw_bits(instr));
}
});
LOG(GLOBAL, LOG_SYSCALLS | LOG_VMAREAS, 2,
"Found vsyscall @ " PFX " => page " PFX ", post " PFX "\n",
instr_get_raw_bits(instr), vsyscall_page_start,
IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_syscall_end_pc));
/* make sure system call numbers match */
IF_WINDOWS(DOCHECK(1, { check_syscall_numbers(dcontext); }));
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
});
} else {
#ifdef WINDOWS
DO_ONCE({
/* FIXME: DO_ONCE will unprot and reprot, and here we unprot again */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Close vsyscall page hole.
* FIXME: the vsyscall page can still be in use and contain int:
* though I have yet to see that case where the page is not marked rx.
* On linux the vsyscall page is reached via "call *%gs:0x10", but
* sometimes that call ends up at /lib/ld-2.3.4.so:_dl_sysinfo_int80
* instead (which is the case when the vsyscall page is marked with no
* permissions).
*/
LOG(GLOBAL, LOG_SYSCALLS | LOG_VMAREAS, 2,
"Closing vsyscall page hole (int @ " PFX ") => page " PFX ", post " PFX
"\n",
instr_get_translation(instr), vsyscall_page_start,
IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_syscall_end_pc));
vsyscall_page_start = NULL;
vsyscall_after_syscall = NULL;
ASSERT_CURIOSITY(new_method != SYSCALL_METHOD_WOW64 ||
(get_os_version() > WINDOWS_VERSION_XP &&
is_wow64_process(NT_CURRENT_PROCESS) &&
"Unexpected WOW64 syscall method"));
/* make sure system call numbers match */
DOCHECK(1, { check_syscall_numbers(dcontext); });
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
});
#else
/* On Linux we can't clear vsyscall_page_start as the app will often use both
* inlined int and vsyscall sysenter system calls. We handle fixing up for
* that in the next ifdef. */
#endif
}
#ifdef UNIX
if (new_method != get_syscall_method() &&
/* PR 286922: for linux, vsyscall method trumps occasional use of int. We
* update do_syscall for the vsyscall method, and use do_int_syscall for any
* int uses. */
(new_method != SYSCALL_METHOD_INT ||
(get_syscall_method() != SYSCALL_METHOD_SYSENTER &&
get_syscall_method() != SYSCALL_METHOD_SYSCALL))) {
ASSERT(get_syscall_method() == SYSCALL_METHOD_UNINITIALIZED ||
get_syscall_method() == SYSCALL_METHOD_INT);
# ifdef LINUX
if (new_method == SYSCALL_METHOD_SYSENTER) {
# ifndef HAVE_TLS
if (DYNAMO_OPTION(hook_vsyscall)) {
/* PR 361894: we use TLS for our vsyscall hook (PR 212570) */
FATAL_USAGE_ERROR(SYSENTER_NOT_SUPPORTED, 2, get_application_name(),
get_application_pid());
}
# endif
/* Hook the sysenter continuation point so we don't lose control */
if (!sysenter_hook_failed && !hook_vsyscall(dcontext, true /*force*/)) {
/* PR 212570: for now we bail out to using int;
* for performance we should clobber the retaddr and
* keep the sysenters.
*/
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
sysenter_hook_failed = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
LOG(GLOBAL, LOG_SYSCALLS | LOG_VMAREAS, 1,
"Unable to hook vsyscall page; falling back on int\n");
}
if (sysenter_hook_failed)
new_method = SYSCALL_METHOD_INT;
}
# endif /* LINUX */
if (get_syscall_method() == SYSCALL_METHOD_UNINITIALIZED ||
new_method != get_syscall_method()) {
set_syscall_method(new_method);
/* update the places we have emitted syscalls: do_*syscall */
update_syscalls(dcontext);
}
}
#else
/* we assume only single method; else need multiple do_syscalls */
ASSERT(new_method == get_syscall_method());
#endif
}
int
get_syscall_method(void)
{
return syscall_method;
}
/* Does the syscall instruction always return to the invocation point? */
bool
does_syscall_ret_to_callsite(void)
{
return (syscall_method == SYSCALL_METHOD_INT ||
syscall_method == SYSCALL_METHOD_SYSCALL ||
syscall_method ==
SYSCALL_METHOD_SVC IF_WINDOWS(|| syscall_method == SYSCALL_METHOD_WOW64)
/* The app is reported to be at whatever's in edx, so
* for our purposes it does return to the call site
* if we always mangle edx to point there. Since we inline
* Mac sysenter (well, we execute it inside fragments, even
* if we don't continue (except maybe in a trace) we do
* want to return true here for skipping syscalls and
* handling interrupted syscalls.
*/
IF_MACOS(|| syscall_method == SYSCALL_METHOD_SYSENTER));
}
void
set_syscall_method(int method)
{
ASSERT(syscall_method == SYSCALL_METHOD_UNINITIALIZED ||
/* on re-attach this happens */
syscall_method ==
method IF_UNIX(|| syscall_method == SYSCALL_METHOD_INT /*PR 286922*/));
syscall_method = method;
}
#ifdef LINUX
/* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330)
* we fall back on int, but we have to tweak syscall param #5 (ebp)
*/
bool
should_syscall_method_be_sysenter(void)
{
return sysenter_hook_failed;
}
#endif
/* returns the address of the first app syscall instruction we saw (see hack
* in win32/os.c that uses this for PRE_SYSCALL_PC, not for general use */
byte *
get_app_sysenter_addr()
{
/* FIXME : would like to assert that this has been initialized, but interp
* bb_process_convertible_indcall() will use it before we initialize it. */
return app_sysenter_instr_addr;
}
size_t
syscall_instr_length(dr_isa_mode_t mode)
{
size_t syslen;
IF_X86_ELSE(
{
ASSERT(INT_LENGTH == SYSCALL_LENGTH);
ASSERT(SYSENTER_LENGTH == SYSCALL_LENGTH);
syslen = SYSCALL_LENGTH;
},
{
syslen = IF_ARM_ELSE(
(mode == DR_ISA_ARM_THUMB ? SVC_THUMB_LENGTH : SVC_ARM_LENGTH),
SVC_LENGTH);
});
return syslen;
}
bool
is_syscall_at_pc(dcontext_t *dcontext, app_pc pc)
{
instr_t instr;
bool res = false;
instr_init(dcontext, &instr);
TRY_EXCEPT(dcontext,
{
pc = decode(dcontext, pc, &instr);
res = (pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr));
},
{});
instr_free(dcontext, &instr);
return res;
}
void
copy_mcontext(priv_mcontext_t *src, priv_mcontext_t *dst)
{
/* FIXME: do we need this? */
*dst = *src;
}
bool
dr_mcontext_to_priv_mcontext(priv_mcontext_t *dst, dr_mcontext_t *src)
{
/* we assume fields from xdi onward are identical.
* if we append to dr_mcontext_t in the future we'll need
* to check src->size here.
*/
if (src->size != sizeof(dr_mcontext_t))
return false;
if (TESTALL(DR_MC_ALL, src->flags))
*dst = *(priv_mcontext_t *)(&MCXT_FIRST_REG_FIELD(src));
else {
if (TEST(DR_MC_INTEGER, src->flags)) {
/* xsp is in the middle of the mcxt, so we save dst->xsp here and
* restore it later so we can use one memcpy for DR_MC_INTEGER.
*/
reg_t save_xsp = dst->xsp;
memcpy(&MCXT_FIRST_REG_FIELD(dst), &MCXT_FIRST_REG_FIELD(src),
/* end of the mcxt integer gpr */
offsetof(priv_mcontext_t, IF_X86_ELSE(xflags, pc)));
dst->xsp = save_xsp;
}
if (TEST(DR_MC_CONTROL, src->flags)) {
/* XXX i#2710: mc->lr should be under DR_MC_CONTROL */
dst->xsp = src->xsp;
dst->xflags = src->xflags;
dst->pc = src->pc;
}
if (TEST(DR_MC_MULTIMEDIA, src->flags)) {
IF_X86_ELSE({ memcpy(&dst->simd, &src->simd, sizeof(dst->simd)); },
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
});
}
}
return true;
}
bool
priv_mcontext_to_dr_mcontext(dr_mcontext_t *dst, priv_mcontext_t *src)
{
/* we assume fields from xdi onward are identical.
* if we append to dr_mcontext_t in the future we'll need
* to check dst->size here.
*/
if (dst->size != sizeof(dr_mcontext_t))
return false;
if (TESTALL(DR_MC_ALL, dst->flags))
*(priv_mcontext_t *)(&MCXT_FIRST_REG_FIELD(dst)) = *src;
else {
if (TEST(DR_MC_INTEGER, dst->flags)) {
/* xsp is in the middle of the mcxt, so we save dst->xsp here and
* restore it later so we can use one memcpy for DR_MC_INTEGER.
*/
reg_t save_xsp = dst->xsp;
memcpy(&MCXT_FIRST_REG_FIELD(dst), &MCXT_FIRST_REG_FIELD(src),
/* end of the mcxt integer gpr */
offsetof(priv_mcontext_t, IF_X86_ELSE(xflags, pc)));
dst->xsp = save_xsp;
}
if (TEST(DR_MC_CONTROL, dst->flags)) {
dst->xsp = src->xsp;
dst->xflags = src->xflags;
dst->pc = src->pc;
}
if (TEST(DR_MC_MULTIMEDIA, dst->flags)) {
IF_X86_ELSE({ memcpy(&dst->simd, &src->simd, sizeof(dst->simd)); },
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
});
}
}
return true;
}
priv_mcontext_t *
dr_mcontext_as_priv_mcontext(dr_mcontext_t *mc)
{
/* It's up to the caller to ensure the proper DR_MC_ flags are set (i#1848) */
return (priv_mcontext_t *)(&MCXT_FIRST_REG_FIELD(mc));
}
priv_mcontext_t *
get_priv_mcontext_from_dstack(dcontext_t *dcontext)
{
return (priv_mcontext_t *)((char *)dcontext->dstack - sizeof(priv_mcontext_t));
}
void
dr_mcontext_init(dr_mcontext_t *mc)
{
mc->size = sizeof(dr_mcontext_t);
mc->flags = DR_MC_ALL;
}
/* dumps the context */
void
dump_mcontext(priv_mcontext_t *context, file_t f, bool dump_xml)
{
print_file(
f,
dump_xml ? "\t<priv_mcontext_t value=\"@" PFX "\""
#ifdef X86
"\n\t\txax=\"" PFX "\"\n\t\txbx=\"" PFX "\""
"\n\t\txcx=\"" PFX "\"\n\t\txdx=\"" PFX "\""
"\n\t\txsi=\"" PFX "\"\n\t\txdi=\"" PFX "\""
"\n\t\txbp=\"" PFX "\"\n\t\txsp=\"" PFX "\""
# ifdef X64
"\n\t\tr8=\"" PFX "\"\n\t\tr9=\"" PFX "\""
"\n\t\tr10=\"" PFX "\"\n\t\tr11=\"" PFX "\""
"\n\t\tr12=\"" PFX "\"\n\t\tr13=\"" PFX "\""
"\n\t\tr14=\"" PFX "\"\n\t\tr15=\"" PFX "\""
# endif /* X64 */
#elif defined(ARM)
"\n\t\tr0=\"" PFX "\"\n\t\tr1=\"" PFX "\""
"\n\t\tr2=\"" PFX "\"\n\t\tr3=\"" PFX "\""
"\n\t\tr4=\"" PFX "\"\n\t\tr5=\"" PFX "\""
"\n\t\tr6=\"" PFX "\"\n\t\tr7=\"" PFX "\""
"\n\t\tr8=\"" PFX "\"\n\t\tr9=\"" PFX "\""
"\n\t\tr10=\"" PFX "\"\n\t\tr11=\"" PFX "\""
"\n\t\tr12=\"" PFX "\"\n\t\tr13=\"" PFX "\""
"\n\t\tr14=\"" PFX "\"\n\t\tr15=\"" PFX "\""
# ifdef X64
"\n\t\tr16=\"" PFX "\"\n\t\tr17=\"" PFX "\""
"\n\t\tr18=\"" PFX "\"\n\t\tr19=\"" PFX "\""
"\n\t\tr20=\"" PFX "\"\n\t\tr21=\"" PFX "\""
"\n\t\tr22=\"" PFX "\"\n\t\tr23=\"" PFX "\""
"\n\t\tr24=\"" PFX "\"\n\t\tr25=\"" PFX "\""
"\n\t\tr26=\"" PFX "\"\n\t\tr27=\"" PFX "\""
"\n\t\tr28=\"" PFX "\"\n\t\tr29=\"" PFX "\""
"\n\t\tr30=\"" PFX "\"\n\t\tr31=\"" PFX "\""
# endif /* X64 */
#endif /* X86/ARM */
: "priv_mcontext_t @" PFX "\n"
#ifdef X86
"\txax = " PFX "\n\txbx = " PFX "\n\txcx = " PFX "\n\txdx = " PFX "\n"
"\txsi = " PFX "\n\txdi = " PFX "\n\txbp = " PFX "\n\txsp = " PFX "\n"
# ifdef X64
"\tr8 = " PFX "\n\tr9 = " PFX "\n\tr10 = " PFX "\n\tr11 = " PFX "\n"
"\tr12 = " PFX "\n\tr13 = " PFX "\n\tr14 = " PFX "\n\tr15 = " PFX "\n"
# endif /* X64 */
#elif defined(ARM)
"\tr0 = " PFX "\n\tr1 = " PFX "\n\tr2 = " PFX "\n\tr3 = " PFX "\n"
"\tr4 = " PFX "\n\tr5 = " PFX "\n\tr6 = " PFX "\n\tr7 = " PFX "\n"
"\tr8 = " PFX "\n\tr9 = " PFX "\n\tr10 = " PFX "\n\tr11 = " PFX "\n"
"\tr12 = " PFX "\n\tr13 = " PFX "\n\tr14 = " PFX "\n\tr15 = " PFX "\n"
# ifdef X64
"\tr16 = " PFX "\n\tr17 = " PFX "\n\tr18 = " PFX "\n\tr19 = " PFX "\n"
"\tr20 = " PFX "\n\tr21 = " PFX "\n\tr22 = " PFX "\n\tr23 = " PFX "\n"
"\tr24 = " PFX "\n\tr25 = " PFX "\n\tr26 = " PFX "\n\tr27 = " PFX "\n"
"\tr28 = " PFX "\n\tr29 = " PFX "\n\tr30 = " PFX "\n\tr31 = " PFX "\n"
# endif /* X64 */
#endif /* X86/ARM */
,
context,
#ifdef X86
context->xax, context->xbx, context->xcx, context->xdx, context->xsi,
context->xdi, context->xbp, context->xsp
# ifdef X64
,
context->r8, context->r9, context->r10, context->r11, context->r12, context->r13,
context->r14, context->r15
# endif /* X64 */
#elif defined(AARCHXX)
context->r0, context->r1, context->r2, context->r3, context->r4, context->r5,
context->r6, context->r7, context->r8, context->r9, context->r10, context->r11,
context->r12, context->r13, context->r14, context->r15
# ifdef X64
,
context->r16, context->r17, context->r18, context->r19, context->r20,
context->r21, context->r22, context->r23, context->r24, context->r25,
context->r26, context->r27, context->r28, context->r29, context->r30, context->r31
# endif /* X64 */
#endif /* X86/ARM */
);
#ifdef X86
/* XXX i#1312: this needs to get extended to AVX-512. */
if (preserve_xmm_caller_saved()) {
int i, j;
for (i = 0; i < proc_num_simd_saved(); i++) {
if (YMM_ENABLED()) {
print_file(f, dump_xml ? "\t\tymm%d= \"0x" : "\tymm%d= 0x", i);
for (j = 0; j < 8; j++) {
print_file(f, "%08x", context->simd[i].u32[j]);
}
} else {
print_file(f, dump_xml ? "\t\txmm%d= \"0x" : "\txmm%d= 0x", i);
/* This would be simpler if we had uint64 fields in dr_xmm_t but
* that complicates our struct layouts */
for (j = 0; j < 4; j++) {
print_file(f, "%08x", context->simd[i].u32[j]);
}
}
print_file(f, dump_xml ? "\"\n" : "\n");
}
DOLOG(2, LOG_INTERP, {
/* Not part of mcontext but useful for tracking app behavior */
if (!dump_xml) {
uint mxcsr;
dr_stmxcsr(&mxcsr);
print_file(f, "\tmxcsr=0x%08x\n", mxcsr);
}
});
}
#elif defined(ARM)
{
int i, j;
/* XXX: should be proc_num_simd_saved(). */
for (i = 0; i < proc_num_simd_registers(); i++) {
print_file(f, dump_xml ? "\t\tqd= \"0x" : "\tq%-3d= 0x", i);
for (j = 0; j < 4; j++) {
print_file(f, "%08x ", context->simd[i].u32[j]);
}
print_file(f, dump_xml ? "\"\n" : "\n");
}
}
#endif
print_file(f,
dump_xml ? "\n\t\teflags=\"" PFX "\"\n\t\tpc=\"" PFX "\" />\n"
: "\teflags = " PFX "\n\tpc = " PFX "\n",
context->xflags, context->pc);
}
#ifdef AARCHXX
reg_t
get_stolen_reg_val(priv_mcontext_t *mc)
{
return *(reg_t *)(((byte *)mc) + opnd_get_reg_dcontext_offs(dr_reg_stolen));
}
void
set_stolen_reg_val(priv_mcontext_t *mc, reg_t newval)
{
*(reg_t *)(((byte *)mc) + opnd_get_reg_dcontext_offs(dr_reg_stolen)) = newval;
}
#endif
#ifdef PROFILE_RDTSC
/* This only works on Pentium I or later */
# ifdef UNIX
__inline__ uint64
get_time()
{
uint64 res;
RDTSC_LL(res);
return res;
}
# else /* WINDOWS */
uint64
get_time()
{
return __rdtsc(); /* compiler intrinsic */
}
# endif
#endif /* PROFILE_RDTSC */
#ifdef DEBUG
bool
is_ibl_routine_type(dcontext_t *dcontext, cache_pc target, ibl_branch_type_t branch_type)
{
ibl_type_t ibl_type;
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, target, &ibl_type _IF_X86_64(NULL));
ASSERT(is_ibl);
return (branch_type == ibl_type.branch_type);
}
#endif /* DEBUG */
/***************************************************************************
* UNIT TEST
*/
#ifdef STANDALONE_UNIT_TEST
# ifdef UNIX
# include <pthread.h>
# endif
# define MAX_NUM_THREADS 3
# define LOOP_COUNT 10000
volatile static int count1 = 0;
volatile static int count2 = 0;
# ifdef X64
volatile static ptr_int_t count3 = 0;
# endif
IF_UNIX_ELSE(void *, DWORD WINAPI)
test_thread_func(void *arg)
{
int i;
/* We first incrment "count" LOOP_COUNT times, then decrement it (LOOP_COUNT-1)
* times, so each thread will increment "count" by 1.
*/
for (i = 0; i < LOOP_COUNT; i++)
ATOMIC_INC(int, count1);
for (i = 0; i < (LOOP_COUNT - 1); i++)
ATOMIC_DEC(int, count1);
for (i = 0; i < LOOP_COUNT; i++)
ATOMIC_ADD(int, count2, 1);
for (i = 0; i < (LOOP_COUNT - 1); i++)
ATOMIC_ADD(int, count2, -1);
return 0;
}
static void
do_parallel_updates()
{
int i;
# ifdef UNIX
pthread_t threads[MAX_NUM_THREADS];
for (i = 0; i < MAX_NUM_THREADS; i++) {
pthread_create(&threads[i], NULL, test_thread_func, NULL);
}
for (i = 0; i < MAX_NUM_THREADS; i++) {
pthread_join(threads[i], NULL);
}
# else /* WINDOWS */
HANDLE threads[MAX_NUM_THREADS];
for (i = 0; i < MAX_NUM_THREADS; i++) {
threads[i] =
CreateThread(NULL, /* use default security attributes */
0, /* use defautl stack size */
test_thread_func, NULL, /* argument to thread function */
0, /* use default creation flags */
NULL /* thread id */);
}
WaitForMultipleObjects(MAX_NUM_THREADS, threads, TRUE, INFINITE);
# endif /* UNIX/WINDOWS */
}
/* some tests for inline asm for atomic ops */
void
unit_test_atomic_ops(void)
{
int value = -1;
# ifdef X64
int64 value64 = -1;
# endif
print_file(STDERR, "test inline asm atomic ops\n");
ATOMIC_4BYTE_WRITE(&count1, value, false);
EXPECT(count1, -1);
# ifdef X64
ATOMIC_8BYTE_WRITE(&count3, value64, false);
EXPECT(count3, -1);
# endif
EXPECT(atomic_inc_and_test(&count1), true); /* result is 0 */
EXPECT(atomic_inc_and_test(&count1), false); /* result is 1 */
EXPECT(atomic_dec_and_test(&count1), false); /* init value is 1, result is 0 */
EXPECT(atomic_dec_and_test(&count1), true); /* init value is 0, result is -1 */
EXPECT(atomic_dec_becomes_zero(&count1), false); /* result is -2 */
EXPECT(atomic_compare_exchange_int(&count1, -3, 1), false); /* no exchange */
EXPECT(count1, -2);
EXPECT(atomic_compare_exchange_int(&count1, -2, 1), true); /* exchange */
EXPECT(atomic_dec_becomes_zero(&count1), true); /* result is 0 */
do_parallel_updates();
EXPECT(count1, MAX_NUM_THREADS);
EXPECT(count2, MAX_NUM_THREADS);
}
#endif /* STANDALONE_UNIT_TEST */
| 1 | 17,650 | Should be inside `ifdef X86` I would think (or moved to ... I guess arch/x86/emit_utils.c) | DynamoRIO-dynamorio | c |
@@ -10,12 +10,14 @@ import (
// LocationCache cache the map of node, pod, configmap, secret
type LocationCache struct {
- //edgeNodes is a list of valid edge nodes
- edgeNodes []string
+ // EdgeNodes is a list of valid edge nodes
+ EdgeNodes []string
// configMapNode is a map, key is namespace/configMapName, value is nodeName
configMapNode sync.Map
// secretNode is a map, key is namespace/secretName, value is nodeName
secretNode sync.Map
+ // Services is an array of services
+ Services []v1.Service
}
// PodConfigMapsAndSecrets return configmaps and secrets used by pod | 1 | package manager
import (
"fmt"
"sync"
"github.com/kubeedge/beehive/pkg/common/log"
"k8s.io/api/core/v1"
)
// LocationCache cache the map of node, pod, configmap, secret
type LocationCache struct {
//edgeNodes is a list of valid edge nodes
edgeNodes []string
// configMapNode is a map, key is namespace/configMapName, value is nodeName
configMapNode sync.Map
// secretNode is a map, key is namespace/secretName, value is nodeName
secretNode sync.Map
}
// PodConfigMapsAndSecrets return configmaps and secrets used by pod
func (lc *LocationCache) PodConfigMapsAndSecrets(pod v1.Pod) (configMaps, secrets []string) {
for _, v := range pod.Spec.Volumes {
if v.ConfigMap != nil {
configMaps = append(configMaps, v.ConfigMap.Name)
}
if v.Secret != nil {
secrets = append(secrets, v.Secret.SecretName)
}
}
// used by envs
for _, s := range pod.Spec.Containers {
for _, ef := range s.EnvFrom {
if ef.ConfigMapRef != nil {
configMaps = append(configMaps, ef.ConfigMapRef.Name)
}
if ef.SecretRef != nil {
secrets = append(secrets, ef.SecretRef.Name)
}
}
}
// used by ImagePullSecrets
for _, s := range pod.Spec.ImagePullSecrets {
secrets = append(secrets, s.Name)
}
return
}
func (lc *LocationCache) newNodes(oldNodes []string, node string) []string {
for _, n := range oldNodes {
if n == node {
return oldNodes
}
}
return append(oldNodes, node)
}
// AddOrUpdatePod add pod to node, pod to configmap, configmap to pod, pod to secret, secret to pod relation
func (lc *LocationCache) AddOrUpdatePod(pod v1.Pod) {
configMaps, secrets := lc.PodConfigMapsAndSecrets(pod)
for _, c := range configMaps {
configMapKey := fmt.Sprintf("%s/%s", pod.Namespace, c)
// update configMapPod
value, ok := lc.configMapNode.Load(configMapKey)
var newNodes []string
if ok {
nodes, _ := value.([]string)
newNodes = lc.newNodes(nodes, pod.Spec.NodeName)
} else {
newNodes = []string{pod.Spec.NodeName}
}
lc.configMapNode.Store(configMapKey, newNodes)
}
for _, s := range secrets {
secretKey := fmt.Sprintf("%s/%s", pod.Namespace, s)
// update secretPod
value, ok := lc.secretNode.Load(secretKey)
var newNodes []string
if ok {
nodes, _ := value.([]string)
newNodes = lc.newNodes(nodes, pod.Spec.NodeName)
} else {
newNodes = []string{pod.Spec.NodeName}
}
lc.secretNode.Store(secretKey, newNodes)
}
}
// ConfigMapNodes return all nodes which deploy pod on with configmap
func (lc *LocationCache) ConfigMapNodes(namespace, name string) (nodes []string) {
configMapKey := fmt.Sprintf("%s/%s", namespace, name)
value, ok := lc.configMapNode.Load(configMapKey)
if ok {
if nodes, ok := value.([]string); ok {
return nodes
}
}
return
}
// SecretNodes return all nodes which deploy pod on with secret
func (lc *LocationCache) SecretNodes(namespace, name string) (nodes []string) {
secretKey := fmt.Sprintf("%s/%s", namespace, name)
value, ok := lc.secretNode.Load(secretKey)
if ok {
if nodes, ok := value.([]string); ok {
return nodes
}
}
return
}
//IsEdgeNode checks weather node is edge node or not
func (lc *LocationCache) IsEdgeNode(nodeName string) bool {
for _, node := range lc.edgeNodes {
if node == nodeName {
return true
}
}
return false
}
//UpdateEdgeNode is to maintain edge nodes name upto-date by querying kubernetes client
func (lc *LocationCache) UpdateEdgeNode(nodeName string) {
lc.edgeNodes = append(lc.edgeNodes, nodeName)
log.LOGGER.Infof("Edge nodes updated : %v \n", lc.edgeNodes)
}
// DeleteConfigMap from cache
func (lc *LocationCache) DeleteConfigMap(namespace, name string) {
lc.configMapNode.Delete(fmt.Sprintf("%s/%s", namespace, name))
}
// DeleteSecret from cache
func (lc *LocationCache) DeleteSecret(namespace, name string) {
lc.secretNode.Delete(fmt.Sprintf("%s/%s", namespace, name))
}
// DeleteNode from cache
func (lc *LocationCache) DeleteNode(name string) {
for i, v := range lc.edgeNodes {
if v == name {
lc.edgeNodes = append(lc.edgeNodes[:i], lc.edgeNodes[i+1:]...)
}
}
}
| 1 | 11,453 | Using sync.Map(key: nodename; value: state) instead of this "EdgeNodes" string slice here would be much better. Reasons: 1. Using sync.Map makes the time complexity of function UpdateEdgeNode and IsEdgeNode O(1), while using string slice with for loop makes it O(n). 2. Later we could be checking node state whether it's "ready" or still "not ready" by directly comparing "value" of sync.Map and doing the node state updating. | kubeedge-kubeedge | go |
@@ -20,6 +20,10 @@ namespace AutoRest.Go
public const string ReadOnlyConstraint = "ReadOnly";
+ private static readonly Regex IsApiVersionPattern = new Regex(@"^api[^a-zA-Z0-9_]?version", RegexOptions.IgnoreCase);
+
+ private static readonly Regex UnwrapAnchorTagsPattern = new Regex("([^<>]*)<a\\s*.*\\shref\\s*=\\s*[\'\"]([^\'\"]*)[\'\"][^>]*>(.*)</a>");
+
private static readonly Regex WordSplitPattern = new Regex(@"(\p{Lu}\p{Ll}+)");
private static Dictionary<string, string> plural = new Dictionary<string, string>() | 1 | // Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using AutoRest.Core.Model;
using AutoRest.Core.Utilities;
using AutoRest.Core.Utilities.Collections;
using AutoRest.Extensions;
using AutoRest.Go.Model;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Text.RegularExpressions;
namespace AutoRest.Go
{
public static class Extensions
{
public const string NullConstraint = "Null";
public const string ReadOnlyConstraint = "ReadOnly";
private static readonly Regex WordSplitPattern = new Regex(@"(\p{Lu}\p{Ll}+)");
private static Dictionary<string, string> plural = new Dictionary<string, string>()
{
{ "eventhub", "eventhubs" },
{ "containerservice", "containerservices" }
};
/////////////////////////////////////////////////////////////////////////////////////////
//
// General Extensions
//
/////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// This method changes string to sentence where is make the first word
/// of sentence to lowercase. The sentence is coming directly from swagger.
/// </summary>
/// <param name="value"></param>
/// <returns></returns>
public static string ToSentence(this string value)
{
if (string.IsNullOrWhiteSpace(value))
{
return string.Empty;
}
else
{
value = value.Trim();
return value.First().ToString().ToLowerInvariant() + (value.Length > 1 ? value.Substring(1) : "");
}
}
/// <summary>
/// Makes first word of the sentence as upper case.
/// </summary>
/// <param name="value"></param>
/// <returns></returns>
public static string Capitalize(this string value)
{
return string.IsNullOrWhiteSpace(value)
? string.Empty
: value.First()
.ToString()
.ToUpperInvariant() + (value.Length > 1 ? value.Substring(1) : "");
}
/// <summary>
/// String manipulation function converts all words in a sentence to lowercase.
/// Refactor -> Namer
/// </summary>
/// <param name="value"></param>
/// <returns></returns>
public static string ToPhrase(this string value)
{
List<string> words = new List<string>(value.ToWords());
for (int i = 0; i < words.Count; i++)
{
words[i] = words[i].ToLowerInvariant();
}
return string.Join(" ", words.ToArray());
}
/// <summary>
/// Split sentence into words.
/// </summary>
/// <param name="value"></param>
/// <returns></returns>
public static string[] ToWords(this string value)
{
return WordSplitPattern.Split(value).Where(s => !string.IsNullOrEmpty(s)).ToArray();
}
/// <summary>
/// This method checks if MethodGroupName is plural of package name.
/// It returns false for packages not listed in dictionary 'plural'.
/// Example, group EventHubs in package EventHub.
/// Refactor -> Namer, but also could be used by the CodeModelTransformer
/// </summary>
/// <param name="value"></param>
/// <param name="packageName"></param>
/// <returns></returns>
public static bool IsNamePlural(this string value, string packageName)
{
return plural.ContainsKey(packageName) && plural[packageName] == value.ToLower();
}
/// <summary>
/// Gets substring from value string.
/// </summary>
/// <param name="value"></param>
/// <param name="s"></param>
/// <returns></returns>
public static string TrimStartsWith(this string value, string s)
{
if (!string.IsNullOrEmpty(s) && s.Length < value.Length && value.StartsWith(s, StringComparison.OrdinalIgnoreCase))
{
value = value.Substring(s.Length);
}
return value;
}
/// <summary>
/// Removes the package name from the beginning of a type or method name.
/// Refactor -> Namer, but could be used by the CodeModelTransformer too
/// </summary>
/// <param name="value">The name of the type or method from which to remove the package name.</param>
/// <param name="packageName">The name of the package to be removed.</param>
/// <returns>A string containing the modified name.</returns>
public static string TrimPackageName(this string value, string packageName)
{
// check if the package name straddles a casing boundary, if it
// does then don't trim the name. e.g. if value == "SubscriptionState"
// and packageName == "subscriptions" it would be incorrect to remove
// the package name from the value.
bool straddle = value.Length > packageName.Length && !char.IsUpper(value[packageName.Length]);
var originalLen = value.Length;
if (!straddle)
value = value.TrimStartsWith(packageName);
// if nothing was trimmed and the package name is plural then make the
// package name singular and try removing that if it's not too short
if (value.Length == originalLen && packageName.EndsWith("s") && (value.Length - packageName.Length + 1) > 1)
{
value = value.TrimPackageName(packageName.Substring(0, packageName.Length - 1));
}
return value;
}
/// <summary>
/// Converts List to formatted string of arguments.
/// Refactor -> Generator
/// </summary>
/// <param name="arguments"></param>
/// <returns></returns>
public static string EmitAsArguments(this IList<string> arguments)
{
var sb = new StringBuilder();
if (arguments.Count() > 0)
{
sb.Append(arguments[0]);
for (var i = 1; i < arguments.Count(); i++)
{
sb.AppendLine(",");
sb.Append(arguments[i]);
}
}
return sb.ToString();
}
// This function removes html anchor tags and reformats the comment text.
// For example, Swagger documentation text --> "This is a documentation text. For information see <a href=LINK">CONTENT.</a>"
// reformats to --> "This is a documentation text. For information see CONTENT (LINK)."
// Refactor -> Namer
// Still, nobody uses this...
public static string UnwrapAnchorTags(this string comments)
{
string pattern = "([^<>]*)<a\\s*.*\\shref\\s*=\\s*[\'\"]([^\'\"]*)[\'\"][^>]*>(.*)</a>";
Regex r = new Regex(pattern);
Match match = r.Match(comments);
if (match.Success)
{
string content = match.Groups[3].Value;
string link = match.Groups[2].Value;
return (".?!;:".Contains(content[content.Length - 1])
? match.Groups[1].Value + content.Substring(0, content.Length - 1) + " (" + link + ")" + content[content.Length - 1]
: match.Groups[1].Value + content + " (" + link + ")");
}
return comments;
}
/// <summary>
/// Return the separator associated with a given collectionFormat
/// It looks like other generators use this for split / join operations ?
/// Refactor -> I think CodeMoedelTransformer
/// </summary>
/// <param name="format">The collection format</param>
/// <returns>The separator</returns>
public static string GetSeparator(this CollectionFormat format)
{
switch (format)
{
case CollectionFormat.Csv:
return ",";
case CollectionFormat.Pipes:
return "|";
case CollectionFormat.Ssv:
return " ";
case CollectionFormat.Tsv:
return "\t";
default:
throw new NotSupportedException(string.Format("Collection format {0} is not supported.", format));
}
}
public static bool IsApiVersion(this string name)
{
string rgx = @"^api[^a-zA-Z0-9_]?version";
return Regex.IsMatch(name, rgx, RegexOptions.IgnoreCase);
}
/////////////////////////////////////////////////////////////////////////////////////////
//
// Type Extensions
//
/////////////////////////////////////////////////////////////////////////////////////////
public static bool IsStreamType(this IModelType body)
{
var r = body as CompositeTypeGo;
return r != null && (r.BaseType.PrimaryType(KnownPrimaryType.Stream));
}
public static bool PrimaryType(this IModelType type, KnownPrimaryType typeToMatch)
{
if (type == null)
{
return false;
}
PrimaryType primaryType = type as PrimaryType;
return primaryType != null && primaryType.KnownPrimaryType == typeToMatch;
}
public static bool CanBeEmpty(this IModelType type)
{
var dictionaryType = type as DictionaryType;
var primaryType = type as PrimaryType;
var sequenceType = type as SequenceType;
var enumType = type as EnumType;
return dictionaryType != null
|| (primaryType != null
&& (primaryType.KnownPrimaryType == KnownPrimaryType.ByteArray
|| primaryType.KnownPrimaryType == KnownPrimaryType.Stream
|| primaryType.KnownPrimaryType == KnownPrimaryType.String))
|| sequenceType != null
|| enumType != null;
}
/// <summary>
/// Returns true if the specified type can be implicitly null.
/// E.g. things like maps, arrays, interfaces etc can all be null.
/// </summary>
/// <param name="type">The type to inspect.</param>
/// <returns>True if the specified type can be null.</returns>
public static bool CanBeNull(this IModelType type)
{
var dictionaryType = type as DictionaryType;
var primaryType = type as PrimaryType;
var sequenceType = type as SequenceType;
return dictionaryType != null
|| (primaryType != null
&& (primaryType.KnownPrimaryType == KnownPrimaryType.ByteArray
|| primaryType.KnownPrimaryType == KnownPrimaryType.Stream))
|| sequenceType != null;
}
public static string GetEmptyCheck(this IModelType type, string valueReference, bool asEmpty = true)
{
if (type is PrimaryTypeGo)
{
return (type as PrimaryTypeGo).GetEmptyCheck(valueReference, asEmpty);
}
else if (type is SequenceTypeGo)
{
return (type as SequenceTypeGo).GetEmptyCheck(valueReference, asEmpty);
}
else if (type is DictionaryTypeGo)
{
return (type as DictionaryTypeGo).GetEmptyCheck(valueReference, asEmpty);
}
else if (type is EnumTypeGo)
{
return (type as EnumTypeGo).GetEmptyCheck(valueReference, asEmpty);
}
else
{
return string.Format(asEmpty
? "{0} == nil"
: "{0} != nil", valueReference);
}
}
/// <summary>
/// Add imports for a type.
/// </summary>
/// <param name="type"></param>
/// <param name="imports"></param>
public static void AddImports(this IModelType type, HashSet<string> imports)
{
if (type is DictionaryTypeGo)
{
(type as DictionaryTypeGo).AddImports(imports);
}
else if (type is PrimaryTypeGo)
{
(type as PrimaryTypeGo).AddImports(imports);
}
else if (type is SequenceTypeGo)
{
(type as SequenceTypeGo).AddImports(imports);
}
}
public static bool ShouldBeSyntheticType(this IModelType type)
{
return (type is PrimaryType || type is SequenceType || type is DictionaryType || type is EnumType);
}
/////////////////////////////////////////////////////////////////////////////////////////
// Validate code
//
// This code generates a validation object which is defined in
// go-autorest/autorest/validation package and is used to validate
// constraints.
// See PR: https://github.com/Azure/go-autorest/tree/master/autorest/validation
//
/////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Return list of validations for primary, map, sequence and rest of the types.
/// </summary>
/// <param name="p"></param>
/// <param name="name"></param>
/// <param name="method"></param>
/// <param name="isCompositeProperties"></param>
/// <returns></returns>
public static List<string> ValidateType(this IVariable p, string name, HttpMethod method,
bool isCompositeProperties = false)
{
List<string> x = new List<string>();
if (method != HttpMethod.Patch || !p.IsBodyParameter() || isCompositeProperties)
{
x.AddRange(p.Constraints.Select(c => GetConstraint(name, c.Key.ToString(), c.Value)).ToList());
}
List<string> y = new List<string>();
if (x.Count > 0)
{
if (p.CheckNull() || isCompositeProperties)
y.AddRange(x.AddChain(name, NullConstraint, p.IsRequired));
else
y.AddRange(x);
}
else
{
if (p.IsRequired && (p.CheckNull() || isCompositeProperties))
y.AddNullValidation(name, p.IsRequired);
}
return y;
}
/// <summary>
/// Return list of validations for composite type.
/// </summary>
/// <param name="p"></param>
/// <param name="name"></param>
/// <param name="method"></param>
/// <param name="ancestors"></param>
/// <param name="isCompositeProperties"></param>
/// <returns></returns>
public static List<string> ValidateCompositeType(this IVariable p, string name, HttpMethod method, HashSet<string> ancestors,
bool isCompositeProperties = false)
{
List<string> x = new List<string>();
if (method != HttpMethod.Patch || !p.IsBodyParameter() || isCompositeProperties)
{
foreach (var prop in ((CompositeType)p.ModelType).Properties)
{
var primary = prop.ModelType as PrimaryType;
var sequence = prop.ModelType as SequenceType;
var map = prop.ModelType as DictionaryTypeGo;
var composite = prop.ModelType as CompositeType;
// if this type was flattened use the name of the type instead of
// the property name as it's been embedded as an anonymous field
var propName = prop.Name;
if (prop.WasFlattened())
propName = prop.ModelType.Name;
if (primary != null || sequence != null || map != null)
{
x.AddRange(prop.ValidateType($"{name}.{propName}", method, true));
}
else if (composite != null)
{
if (ancestors.Contains(composite.Name))
{
x.AddNullValidation($"{name}.{propName}", p.IsRequired);
}
else
{
ancestors.Add(composite.Name);
x.AddRange(prop.ValidateCompositeType($"{name}.{propName}", method, ancestors, true));
ancestors.Remove(composite.Name);
}
}
}
}
List<string> y = new List<string>();
if (x.Count > 0)
{
if (p.CheckNull() || isCompositeProperties)
y.AddRange(x.AddChain(name, NullConstraint, p.IsRequired));
else
y.AddRange(x);
}
else
{
if (p.IsRequired && (p.CheckNull() || isCompositeProperties))
y.AddNullValidation(name, p.IsRequired);
}
return y;
}
/// <summary>
/// Add null validation in validation object.
/// </summary>
/// <param name="v"></param>
/// <param name="name"></param>
/// <param name="isRequired"></param>
public static void AddNullValidation(this List<string> v, string name, bool isRequired = false)
{
v.Add(GetConstraint(name, NullConstraint, $"{isRequired}".ToLower()));
}
/// <summary>
/// Add chain of validation for composite type.
/// </summary>
/// <param name="x"></param>
/// <param name="name"></param>
/// <param name="constraint"></param>
/// <param name="isRequired"></param>
/// <returns></returns>
public static List<string> AddChain(this List<string> x, string name, string constraint, bool isRequired)
{
List<string> a = new List<string>
{
GetConstraint(name, constraint, $"{isRequired}".ToLower(), true),
$"Chain: []validation.Constraint{{{x[0]}"
};
a.AddRange(x.GetRange(1, x.Count - 1));
a.Add("}}");
return a;
}
/// <summary>
/// CheckNull
/// </summary>
/// <param name="p"></param>
/// <returns></returns>
// Check if type is not a null or pointer type.
public static bool CheckNull(this IVariable p)
{
return p is Parameter && (p.ModelType.IsNullValueType() || !(p.IsRequired || p.ModelType.CanBeEmpty()));
}
/// <summary>
/// Check whether a type is nullable type.
/// </summary>
/// <param name="t"></param>
/// <returns></returns>
public static bool IsNullValueType(this IModelType t)
{
var dictionaryType = t as DictionaryType;
var primaryType = t as PrimaryType;
var sequenceType = t as SequenceType;
return dictionaryType != null
|| (primaryType != null
&& primaryType.KnownPrimaryType == KnownPrimaryType.ByteArray)
|| sequenceType != null;
}
/// <summary>
/// Check if parameter is a body parameter.
/// </summary>
/// <param name="p"></param>
/// <returns></returns>
public static bool IsBodyParameter(this IVariable p)
{
return p is Parameter && ((Parameter)p).Location == ParameterLocation.Body;
}
/// <summary>
/// Construct validation string for validation object for the passed constraint.
/// </summary>
/// <param name="name"></param>
/// <param name="constraintName"></param>
/// <param name="constraintValue"></param>
/// <param name="chain"></param>
/// <returns></returns>
public static string GetConstraint(string name, string constraintName, string constraintValue, bool chain = false)
{
var value = constraintName == Constraint.Pattern.ToString()
? $"`{constraintValue}`"
: constraintValue;
return string.Format(chain
? "\t{{Target: \"{0}\", Name: validation.{1}, Rule: {2} "
: "\t{{Target: \"{0}\", Name: validation.{1}, Rule: {2}, Chain: nil }}",
name, constraintName, value);
}
}
}
| 1 | 24,268 | nit, does it make sense to get this variables outside the func where they are used? | Azure-autorest | java |
@@ -3,7 +3,7 @@
# Purpose:
# sns-ruby-example-show-subscriptions.rb demonstrates how to list subscriptions to the Amazon Simple Notification Services (SNS) topic using
-# the AWS SDK for JavaScript (v3).
+# the AWS SDK for Ruby.
# Inputs:
# - REGION | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# Purpose:
# sns-ruby-example-show-subscriptions.rb demonstrates how to list subscriptions to the Amazon Simple Notification Services (SNS) topic using
# the AWS SDK for JavaScript (v3).
# Inputs:
# - REGION
# - SNS_TOPIC
# snippet-start:[sns.Ruby.showSubscription]
require 'aws-sdk-sns' # v2: require 'aws-sdk'
def show_subscriptions?(sns_client, topic_arn)
topic = sns_client.topic(topic_arn)
topic.subscriptions.each do |s|
puts s.attributes['Endpoint']
end
rescue StandardError => e
puts "Error while sending the message: #{e.message}"
end
def run_me
topic_arn = 'arn:aws:sns:eu-west-1:164794437551:brmurrubytopic'
region = 'eu-west-1'
sns_client = Aws::SNS::Resource.new(region: region)
puts "Listing subscriptions to the topic."
if show_subscriptions?(sns_client, topic_arn)
else
puts 'There was an error. Stopping program.'
exit 1
end
end
run_me if $PROGRAM_NAME == __FILE__
# snippet-end:[sns.Ruby.showSubscription]
| 1 | 20,568 | to **an** Amazon... Simple Notification **Service** (singular) | awsdocs-aws-doc-sdk-examples | rb |
@@ -297,10 +297,13 @@ func TestOpenTopicFromURL(t *testing.T) {
ctx := context.Background()
for _, test := range tests {
- _, err := pubsub.OpenTopic(ctx, test.URL)
+ top, err := pubsub.OpenTopic(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
+ if top != nil {
+ top.Shutdown(ctx)
+ }
}
}
| 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package awssnssqs
import (
"context"
"fmt"
"net/http"
"strings"
"sync/atomic"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
"github.com/aws/aws-sdk-go/service/sqs"
"gocloud.dev/internal/testing/setup"
"gocloud.dev/pubsub"
"gocloud.dev/pubsub/driver"
"gocloud.dev/pubsub/drivertest"
)
const (
region = "us-east-2"
accountNumber = "462380225722"
)
func newSession() (*session.Session, error) {
return session.NewSession(&aws.Config{
HTTPClient: &http.Client{},
Region: aws.String(region),
MaxRetries: aws.Int(0),
})
}
type harness struct {
sess *session.Session
rt http.RoundTripper
closer func()
numTopics uint32
numSubs uint32
}
func newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {
sess, rt, done := setup.NewAWSSession2(ctx, t, region)
return &harness{sess: sess, rt: rt, closer: done, numTopics: 0, numSubs: 0}, nil
}
func (h *harness) CreateTopic(ctx context.Context, testName string) (dt driver.Topic, cleanup func(), err error) {
topicName := fmt.Sprintf("%s-topic-%d", sanitize(testName), atomic.AddUint32(&h.numTopics, 1))
return createTopic(ctx, topicName, h.sess)
}
func createTopic(ctx context.Context, topicName string, sess *session.Session) (dt driver.Topic, cleanup func(), err error) {
client := sns.New(sess)
out, err := client.CreateTopic(&sns.CreateTopicInput{Name: aws.String(topicName)})
if err != nil {
return nil, nil, fmt.Errorf(`creating topic "%s": %v`, topicName, err)
}
dt = openTopic(ctx, sess, *out.TopicArn, nil)
cleanup = func() {
client.DeleteTopic(&sns.DeleteTopicInput{TopicArn: out.TopicArn})
}
return dt, cleanup, nil
}
func (h *harness) MakeNonexistentTopic(ctx context.Context) (driver.Topic, error) {
const fakeTopicARN = "arn:aws:sns:" + region + ":" + accountNumber + ":nonexistenttopic"
dt := openTopic(ctx, h.sess, fakeTopicARN, nil)
return dt, nil
}
func (h *harness) CreateSubscription(ctx context.Context, dt driver.Topic, testName string) (ds driver.Subscription, cleanup func(), err error) {
subName := fmt.Sprintf("%s-subscription-%d", sanitize(testName), atomic.AddUint32(&h.numSubs, 1))
return createSubscription(ctx, dt, subName, h.sess)
}
func createSubscription(ctx context.Context, dt driver.Topic, subName string, sess *session.Session) (ds driver.Subscription, cleanup func(), err error) {
sqsClient := sqs.New(sess)
out, err := sqsClient.CreateQueue(&sqs.CreateQueueInput{QueueName: aws.String(subName)})
if err != nil {
return nil, nil, fmt.Errorf(`creating subscription queue "%s": %v`, subName, err)
}
ds = openSubscription(ctx, sess, *out.QueueUrl)
snsClient := sns.New(sess, &aws.Config{})
cleanupSub, err := subscribeQueueToTopic(ctx, sqsClient, snsClient, out.QueueUrl, dt)
if err != nil {
return nil, nil, fmt.Errorf("subscribing: %v", err)
}
cleanup = func() {
sqsClient.DeleteQueue(&sqs.DeleteQueueInput{QueueUrl: out.QueueUrl})
cleanupSub()
}
return ds, cleanup, nil
}
func subscribeQueueToTopic(ctx context.Context, sqsClient *sqs.SQS, snsClient *sns.SNS, qURL *string, dt driver.Topic) (func(), error) {
out2, err := sqsClient.GetQueueAttributes(&sqs.GetQueueAttributesInput{
QueueUrl: qURL,
AttributeNames: []*string{aws.String("QueueArn")},
})
if err != nil {
return nil, fmt.Errorf("getting queue ARN for %s: %v", *qURL, err)
}
qARN := out2.Attributes["QueueArn"]
t := dt.(*topic)
subOut, err := snsClient.Subscribe(&sns.SubscribeInput{
TopicArn: aws.String(t.arn),
Endpoint: qARN,
Protocol: aws.String("sqs"),
})
if err != nil {
return nil, fmt.Errorf("subscribing: %v", err)
}
cleanup := func() {
_, _ = snsClient.Unsubscribe(&sns.UnsubscribeInput{
SubscriptionArn: subOut.SubscriptionArn,
})
}
queuePolicy := `{
"Version": "2012-10-17",
"Id": "AllowQueue",
"Statement": [
{
"Sid": "MySQSPolicy001",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "sqs:SendMessage",
"Resource": "` + *qARN + `",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "` + t.arn + `"
}
}
}
]
}`
_, err = sqsClient.SetQueueAttributes(&sqs.SetQueueAttributesInput{
Attributes: map[string]*string{"Policy": &queuePolicy},
QueueUrl: qURL,
})
if err != nil {
return nil, fmt.Errorf("setting policy: %v", err)
}
return cleanup, nil
}
func (h *harness) MakeNonexistentSubscription(ctx context.Context) (driver.Subscription, error) {
const fakeSubscriptionQueueURL = "https://" + region + ".amazonaws.com/" + accountNumber + "/nonexistent-subscription"
return openSubscription(ctx, h.sess, fakeSubscriptionQueueURL), nil
}
func (h *harness) Close() {
h.closer()
}
func (h *harness) MaxBatchSizes() (int, int) {
return sendBatcherOpts.MaxBatchSize, ackBatcherOpts.MaxBatchSize
}
// Tips on dealing with failures when in -record mode:
// - There may be leftover messages in queues. Using the AWS CLI tool,
// purge the queues before running the test.
// E.g.
// aws sqs purge-queue --queue-url URL
// You can get the queue URLs with
// aws sqs list-queues
func TestConformance(t *testing.T) {
asTests := []drivertest.AsTest{awsAsTest{}}
drivertest.RunConformanceTests(t, newHarness, asTests)
}
type awsAsTest struct{}
func (awsAsTest) Name() string {
return "aws test"
}
func (awsAsTest) TopicCheck(top *pubsub.Topic) error {
var s *sns.SNS
if !top.As(&s) {
return fmt.Errorf("cast failed for %T", s)
}
return nil
}
func (awsAsTest) SubscriptionCheck(sub *pubsub.Subscription) error {
var s *sqs.SQS
if !sub.As(&s) {
return fmt.Errorf("cast failed for %T", s)
}
return nil
}
func (awsAsTest) TopicErrorCheck(t *pubsub.Topic, err error) error {
var ae awserr.Error
if !t.ErrorAs(err, &ae) {
return fmt.Errorf("failed to convert %v (%T) to an awserr.Error", err, err)
}
if got, want := ae.Code(), sns.ErrCodeNotFoundException; got != want {
return fmt.Errorf("got %q, want %q", got, want)
}
return nil
}
func (awsAsTest) SubscriptionErrorCheck(s *pubsub.Subscription, err error) error {
var ae awserr.Error
if !s.ErrorAs(err, &ae) {
return fmt.Errorf("failed to convert %v (%T) to an awserr.Error", err, err)
}
if got, want := ae.Code(), sqs.ErrCodeQueueDoesNotExist; got != want {
return fmt.Errorf("got %q, want %q", got, want)
}
return nil
}
func (awsAsTest) MessageCheck(m *pubsub.Message) error {
var sm sqs.Message
if m.As(&sm) {
return fmt.Errorf("cast succeeded for %T, want failure", &sm)
}
var psm *sqs.Message
if !m.As(&psm) {
return fmt.Errorf("cast failed for %T", &psm)
}
return nil
}
func sanitize(testName string) string {
return strings.Replace(testName, "/", "_", -1)
}
// The first run will hang because the SQS queue is not yet subscribed to the
// SNS topic. Go to console.aws.amazon.com and manually subscribe the queue
// to the topic and then rerun this benchmark to get results.
func BenchmarkAwsPubSub(b *testing.B) {
ctx := context.Background()
sess, err := session.NewSession(&aws.Config{
HTTPClient: &http.Client{},
Region: aws.String(region),
MaxRetries: aws.Int(0),
})
if err != nil {
b.Fatal(err)
}
topicName := fmt.Sprintf("%s-topic", b.Name())
dt, cleanup1, err := createTopic(ctx, topicName, sess)
if err != nil {
b.Fatal(err)
}
defer cleanup1()
topic := pubsub.NewTopic(dt, sendBatcherOpts)
defer topic.Shutdown(ctx)
subName := fmt.Sprintf("%s-subscription", b.Name())
ds, cleanup2, err := createSubscription(ctx, dt, subName, sess)
if err != nil {
b.Fatal(err)
}
defer cleanup2()
sub := pubsub.NewSubscription(ds, recvBatcherOpts, ackBatcherOpts)
defer sub.Shutdown(ctx)
drivertest.RunBenchmarks(b, topic, sub)
}
func TestOpenTopicFromURL(t *testing.T) {
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"awssns://arn:aws:service:region:accountid:resourceType/resourcePath", false},
// OK, setting region.
{"awssns://arn:aws:service:region:accountid:resourceType/resourcePath?region=us-east-2", false},
// Invalid parameter.
{"awssns://arn:aws:service:region:accountid:resourceType/resourcePath?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := pubsub.OpenTopic(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
func TestOpenSubscriptionFromURL(t *testing.T) {
tests := []struct {
URL string
WantErr bool
}{
// OK.
{"awssqs://sqs.us-east-2.amazonaws.com/99999/my-subscription", false},
// OK, setting region.
{"awssqs://sqs.us-east-2.amazonaws.com/99999/my-subscription?region=us-east-2", false},
// Invalid parameter.
{"awssqs://sqs.us-east-2.amazonaws.com/99999/my-subscription?param=value", true},
}
ctx := context.Background()
for _, test := range tests {
_, err := pubsub.OpenSubscription(ctx, test.URL)
if (err != nil) != test.WantErr {
t.Errorf("%s: got error %v, want error %v", test.URL, err, test.WantErr)
}
}
}
| 1 | 16,640 | Nit: is "top" a short name we use frequently? Seems a bit mysterious, and saves only 2 chars. | google-go-cloud | go |
@@ -52,7 +52,9 @@ import (
// R represents row type in executions table, valid values are:
// R = {Shard = 1, Execution = 2, Transfer = 3, Timer = 4, Replication = 5}
const (
- cassandraProtoVersion = 4
+ // ProtoVersion is the protocol version used to communicate with Cassandra cluster
+ ProtoVersion = 4
+
defaultSessionTimeout = 10 * time.Second
// Special Namespaces related constants
emptyNamespaceID = "10000000-0000-f000-f000-000000000000" | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cassandra
import (
"fmt"
"strings"
"time"
"github.com/gocql/gocql"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
"go.temporal.io/api/serviceerror"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/log"
p "go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/persistence/versionhistory"
"go.temporal.io/server/common/primitives/timestamp"
)
// "go.temporal.io/api/serviceerror"
// Guidelines for creating new special UUID constants
// Each UUID should be of the form: E0000000-R000-f000-f000-00000000000x
// Where x is any hexadecimal value, E represents the entity type valid values are:
// E = {NamespaceID = 1, WorkflowID = 2, RunID = 3}
// R represents row type in executions table, valid values are:
// R = {Shard = 1, Execution = 2, Transfer = 3, Timer = 4, Replication = 5}
const (
cassandraProtoVersion = 4
defaultSessionTimeout = 10 * time.Second
// Special Namespaces related constants
emptyNamespaceID = "10000000-0000-f000-f000-000000000000"
// Special Run IDs
emptyRunID = "30000000-0000-f000-f000-000000000000"
permanentRunID = "30000000-0000-f000-f000-000000000001"
// Row Constants for Shard Row
rowTypeShardNamespaceID = "10000000-1000-f000-f000-000000000000"
rowTypeShardWorkflowID = "20000000-1000-f000-f000-000000000000"
rowTypeShardRunID = "30000000-1000-f000-f000-000000000000"
// Row Constants for Transfer Task Row
rowTypeTransferNamespaceID = "10000000-3000-f000-f000-000000000000"
rowTypeTransferWorkflowID = "20000000-3000-f000-f000-000000000000"
rowTypeTransferRunID = "30000000-3000-f000-f000-000000000000"
// Row Constants for Timer Task Row
rowTypeTimerNamespaceID = "10000000-4000-f000-f000-000000000000"
rowTypeTimerWorkflowID = "20000000-4000-f000-f000-000000000000"
rowTypeTimerRunID = "30000000-4000-f000-f000-000000000000"
// Row Constants for Replication Task Row
rowTypeReplicationNamespaceID = "10000000-5000-f000-f000-000000000000"
rowTypeReplicationWorkflowID = "20000000-5000-f000-f000-000000000000"
rowTypeReplicationRunID = "30000000-5000-f000-f000-000000000000"
// Row constants for visibility task row.
rowTypeVisibilityTaskNamespaceID = "10000000-6000-f000-f000-000000000000"
rowTypeVisibilityTaskWorkflowID = "20000000-6000-f000-f000-000000000000"
rowTypeVisibilityTaskRunID = "30000000-6000-f000-f000-000000000000"
// Row Constants for Replication Task DLQ Row. Source cluster name will be used as WorkflowID.
rowTypeDLQNamespaceID = "10000000-6000-f000-f000-000000000000"
rowTypeDLQRunID = "30000000-6000-f000-f000-000000000000"
// Special TaskId constants
rowTypeExecutionTaskID = int64(-10)
rowTypeShardTaskID = int64(-11)
emptyInitiatedID = int64(-7)
stickyTaskQueueTTL = int32(24 * time.Hour / time.Second) // if sticky task_queue stopped being updated, remove it in one day
)
const (
// Row types for table executions
rowTypeShard = iota
rowTypeExecution
rowTypeTransferTask
rowTypeTimerTask
rowTypeReplicationTask
rowTypeDLQ
rowTypeVisibilityTask
)
const (
// Row types for table tasks
rowTypeTask = iota
rowTypeTaskQueue
)
const (
taskQueueTaskID = -12345
initialRangeID = 1 // Id of the first range of a new task queue
)
const (
templateCreateShardQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, visibility_ts, task_id, shard, shard_encoding, range_id)` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS`
templateGetShardQuery = `SELECT shard, shard_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateUpdateShardQuery = `UPDATE executions ` +
`SET shard = ?, shard_encoding = ?, range_id = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateUpdateCurrentWorkflowExecutionQuery = `UPDATE executions USING TTL 0 ` +
`SET current_run_id = ?, execution_state = ?, execution_state_encoding = ?, workflow_last_write_version = ?, workflow_state = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF current_run_id = ? `
templateUpdateCurrentWorkflowExecutionForNewQuery = templateUpdateCurrentWorkflowExecutionQuery +
`and workflow_last_write_version = ? ` +
`and workflow_state = ? `
templateCreateCurrentWorkflowExecutionQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, ` +
`visibility_ts, task_id, current_run_id, execution_state, execution_state_encoding, ` +
`workflow_last_write_version, workflow_state) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS USING TTL 0 `
templateCreateWorkflowExecutionQuery = `INSERT INTO executions (` +
`shard_id, namespace_id, workflow_id, run_id, type, ` +
`execution, execution_encoding, execution_state, execution_state_encoding, next_event_id, ` +
`visibility_ts, task_id, checksum, checksum_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS `
templateCreateTransferTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, transfer, transfer_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateReplicationTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, replication, replication_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateVisibilityTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, visibility_task_data, visibility_task_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateCreateTimerTaskQuery = `INSERT INTO executions (` +
`shard_id, type, namespace_id, workflow_id, run_id, timer, timer_encoding, visibility_ts, task_id) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)`
templateUpdateLeaseQuery = `UPDATE executions ` +
`SET range_id = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateGetWorkflowExecutionQuery = `SELECT execution, execution_encoding, execution_state, execution_state_encoding, next_event_id, activity_map, activity_map_encoding, timer_map, timer_map_encoding, ` +
`child_executions_map, child_executions_map_encoding, request_cancel_map, request_cancel_map_encoding, signal_map, signal_map_encoding, signal_requested, buffered_events_list, ` +
`checksum, checksum_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateGetCurrentExecutionQuery = `SELECT current_run_id, execution, execution_encoding, execution_state, execution_state_encoding, workflow_last_write_version ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateListWorkflowExecutionQuery = `SELECT run_id, execution, execution_encoding, execution_state, execution_state_encoding, next_event_id ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ?`
templateCheckWorkflowExecutionQuery = `UPDATE executions ` +
`SET next_event_id = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF next_event_id = ?`
templateUpdateWorkflowExecutionQuery = `UPDATE executions ` +
`SET execution = ? ` +
`, execution_encoding = ? ` +
`, execution_state = ? ` +
`, execution_state_encoding = ? ` +
`, next_event_id = ? ` +
`, checksum = ? ` +
`, checksum_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? ` +
`IF next_event_id = ? `
templateUpdateActivityInfoQuery = `UPDATE executions ` +
`SET activity_map[ ? ] = ?, activity_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetActivityInfoQuery = `UPDATE executions ` +
`SET activity_map = ?, activity_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateTimerInfoQuery = `UPDATE executions ` +
`SET timer_map[ ? ] = ?, timer_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetTimerInfoQuery = `UPDATE executions ` +
`SET timer_map = ?, timer_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateChildExecutionInfoQuery = `UPDATE executions ` +
`SET child_executions_map[ ? ] = ?, child_executions_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetChildExecutionInfoQuery = `UPDATE executions ` +
`SET child_executions_map = ?, child_executions_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateRequestCancelInfoQuery = `UPDATE executions ` +
`SET request_cancel_map[ ? ] = ?, request_cancel_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetRequestCancelInfoQuery = `UPDATE executions ` +
`SET request_cancel_map = ?, request_cancel_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateSignalInfoQuery = `UPDATE executions ` +
`SET signal_map[ ? ] = ?, signal_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetSignalInfoQuery = `UPDATE executions ` +
`SET signal_map = ?, signal_map_encoding = ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateUpdateSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = signal_requested + ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateResetSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = ?` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateAppendBufferedEventsQuery = `UPDATE executions ` +
`SET buffered_events_list = buffered_events_list + ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteBufferedEventsQuery = `UPDATE executions ` +
`SET buffered_events_list = [] ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteActivityInfoQuery = `DELETE activity_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteTimerInfoQuery = `DELETE timer_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteChildExecutionInfoQuery = `DELETE child_executions_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteRequestCancelInfoQuery = `DELETE request_cancel_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteSignalInfoQuery = `DELETE signal_map[ ? ] ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteWorkflowExecutionMutableStateQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateDeleteWorkflowExecutionCurrentRowQuery = templateDeleteWorkflowExecutionMutableStateQuery + " if current_run_id = ? "
templateDeleteWorkflowExecutionSignalRequestedQuery = `UPDATE executions ` +
`SET signal_requested = signal_requested - ? ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTransferTaskQuery = `SELECT transfer, transfer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTransferTasksQuery = `SELECT transfer, transfer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateGetVisibilityTaskQuery = `SELECT visibility_task_data, visibility_task_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetVisibilityTasksQuery = `SELECT visibility_task_data, visibility_task_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateGetReplicationTaskQuery = `SELECT replication, replication_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetReplicationTasksQuery = `SELECT replication, replication_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteTransferTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteTransferTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteVisibilityTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteVisibilityTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteReplicationTaskBeforeQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id <= ?`
templateCompleteReplicationTaskQuery = templateCompleteTransferTaskQuery
templateRangeCompleteReplicationTaskQuery = templateRangeCompleteTransferTaskQuery
templateGetTimerTaskQuery = `SELECT timer, timer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ? ` +
`and run_id = ? ` +
`and visibility_ts = ? ` +
`and task_id = ? `
templateGetTimerTasksQuery = `SELECT timer, timer_encoding ` +
`FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ?` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts >= ? ` +
`and visibility_ts < ?`
templateCompleteTimerTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts = ? ` +
`and task_id = ?`
templateRangeCompleteTimerTaskQuery = `DELETE FROM executions ` +
`WHERE shard_id = ? ` +
`and type = ? ` +
`and namespace_id = ? ` +
`and workflow_id = ?` +
`and run_id = ?` +
`and visibility_ts >= ? ` +
`and visibility_ts < ?`
templateCreateTaskQuery = `INSERT INTO tasks (` +
`namespace_id, task_queue_name, task_queue_type, type, task_id, task, task_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?)`
templateCreateTaskWithTTLQuery = `INSERT INTO tasks (` +
`namespace_id, task_queue_name, task_queue_type, type, task_id, task, task_encoding) ` +
`VALUES(?, ?, ?, ?, ?, ?, ?) USING TTL ?`
templateGetTasksQuery = `SELECT task_id, task, task_encoding ` +
`FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id > ? ` +
`and task_id <= ?`
templateCompleteTaskQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ?`
templateCompleteTasksLessThanQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`AND task_queue_name = ? ` +
`AND task_queue_type = ? ` +
`AND type = ? ` +
`AND task_id <= ? `
templateGetTaskQueue = `SELECT ` +
`range_id, ` +
`task_queue, ` +
`task_queue_encoding ` +
`FROM tasks ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ?`
templateInsertTaskQueueQuery = `INSERT INTO tasks (` +
`namespace_id, ` +
`task_queue_name, ` +
`task_queue_type, ` +
`type, ` +
`task_id, ` +
`range_id, ` +
`task_queue, ` +
`task_queue_encoding ` +
`) VALUES (?, ?, ?, ?, ?, ?, ?, ?) IF NOT EXISTS`
templateUpdateTaskQueueQuery = `UPDATE tasks SET ` +
`range_id = ?, ` +
`task_queue = ?, ` +
`task_queue_encoding = ? ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateUpdateTaskQueueQueryWithTTLPart1 = `INSERT INTO tasks (` +
`namespace_id, ` +
`task_queue_name, ` +
`task_queue_type, ` +
`type, ` +
`task_id ` +
`) VALUES (?, ?, ?, ?, ?) USING TTL ?`
templateUpdateTaskQueueQueryWithTTLPart2 = `UPDATE tasks USING TTL ? SET ` +
`range_id = ?, ` +
`task_queue = ?, ` +
`task_queue_encoding = ? ` +
`WHERE namespace_id = ? ` +
`and task_queue_name = ? ` +
`and task_queue_type = ? ` +
`and type = ? ` +
`and task_id = ? ` +
`IF range_id = ?`
templateDeleteTaskQueueQuery = `DELETE FROM tasks ` +
`WHERE namespace_id = ? ` +
`AND task_queue_name = ? ` +
`AND task_queue_type = ? ` +
`AND type = ? ` +
`AND task_id = ? ` +
`IF range_id = ?`
)
var (
defaultDateTime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
defaultVisibilityTimestamp = p.UnixNanoToDBTimestamp(defaultDateTime.UnixNano())
)
type (
cassandraStore struct {
session *gocql.Session
logger log.Logger
}
// Implements ExecutionManager, ShardManager and TaskManager
cassandraPersistence struct {
cassandraStore
shardID int32
currentClusterName string
}
)
var _ p.ExecutionStore = (*cassandraPersistence)(nil)
// newShardPersistence is used to create an instance of ShardManager implementation
func newShardPersistence(
session *gocql.Session,
clusterName string,
logger log.Logger,
) (p.ShardStore, error) {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
shardID: -1,
currentClusterName: clusterName,
}, nil
}
// NewWorkflowExecutionPersistence is used to create an instance of workflowExecutionManager implementation
func NewWorkflowExecutionPersistence(
shardID int32,
session *gocql.Session,
logger log.Logger,
) (p.ExecutionStore, error) {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
shardID: shardID,
}, nil
}
// newTaskPersistence is used to create an instance of TaskManager implementation
func newTaskPersistence(
session *gocql.Session,
logger log.Logger,
) (p.TaskStore, error) {
return &cassandraPersistence{
cassandraStore: cassandraStore{session: session, logger: logger},
shardID: -1,
}, nil
}
func (d *cassandraStore) GetName() string {
return cassandraPersistenceName
}
// Close releases the underlying resources held by this object
func (d *cassandraStore) Close() {
if d.session != nil {
d.session.Close()
}
}
func (d *cassandraPersistence) GetShardID() int32 {
return d.shardID
}
func (d *cassandraPersistence) CreateShard(request *p.CreateShardRequest) error {
shardInfo := request.ShardInfo
shardInfo.UpdateTime = timestamp.TimeNowPtrUtc()
data, err := serialization.ShardInfoToBlob(shardInfo)
if err != nil {
return convertCommonErrors("CreateShard", err)
}
query := d.session.Query(templateCreateShardQuery,
shardInfo.GetShardId(),
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
data.Data,
data.EncodingType.String(),
shardInfo.GetRangeId())
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return convertCommonErrors("CreateShard", err)
}
if !applied {
data := previous["shard"].([]byte)
encoding := previous["shard_encoding"].(string)
shard, _ := serialization.ShardInfoFromBlob(data, encoding, d.currentClusterName)
return &p.ShardAlreadyExistError{
Msg: fmt.Sprintf("Shard already exists in executions table. ShardId: %v, RangeId: %v",
shard.GetShardId(), shard.GetRangeId()),
}
}
return nil
}
func (d *cassandraPersistence) GetShard(request *p.GetShardRequest) (*p.GetShardResponse, error) {
shardID := request.ShardID
query := d.session.Query(templateGetShardQuery,
shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, convertCommonErrors("GetShard", err)
}
info, err := serialization.ShardInfoFromBlob(data, encoding, d.currentClusterName)
if err != nil {
return nil, convertCommonErrors("GetShard", err)
}
return &p.GetShardResponse{ShardInfo: info}, nil
}
func (d *cassandraPersistence) UpdateShard(request *p.UpdateShardRequest) error {
shardInfo := request.ShardInfo
shardInfo.UpdateTime = timestamp.TimeNowPtrUtc()
data, err := serialization.ShardInfoToBlob(shardInfo)
if err != nil {
return convertCommonErrors("UpdateShard", err)
}
query := d.session.Query(templateUpdateShardQuery,
data.Data,
data.EncodingType.String(),
shardInfo.GetRangeId(),
shardInfo.GetShardId(), // Where
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.PreviousRangeID) // If
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
return convertCommonErrors("UpdateShard", err)
}
if !applied {
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
return &p.ShardOwnershipLostError{
ShardID: d.shardID,
Msg: fmt.Sprintf("Failed to update shard. previous_range_id: %v, columns: (%v)",
request.PreviousRangeID, strings.Join(columns, ",")),
}
}
return nil
}
func (d *cassandraPersistence) CreateWorkflowExecution(
request *p.InternalCreateWorkflowExecutionRequest,
) (*p.CreateWorkflowExecutionResponse, error) {
batch := d.session.NewBatch(gocql.LoggedBatch)
newWorkflow := request.NewWorkflowSnapshot
lastWriteVersion := newWorkflow.LastWriteVersion
namespaceID := newWorkflow.ExecutionInfo.NamespaceId
workflowID := newWorkflow.ExecutionInfo.WorkflowId
runID := newWorkflow.ExecutionState.RunId
if err := p.ValidateCreateWorkflowModeState(
request.Mode,
newWorkflow,
); err != nil {
return nil, err
}
switch request.Mode {
case p.CreateWorkflowModeZombie:
// noop
default:
if err := createOrUpdateCurrentExecution(batch,
request.Mode,
d.shardID,
namespaceID,
workflowID,
runID,
newWorkflow.ExecutionState.State,
newWorkflow.ExecutionState.Status,
newWorkflow.ExecutionState.CreateRequestId,
lastWriteVersion,
request.PreviousRunID,
request.PreviousLastWriteVersion,
); err != nil {
return nil, err
}
}
if err := applyWorkflowSnapshotBatchAsNew(batch,
d.shardID,
&newWorkflow,
); err != nil {
return nil, err
}
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
d.shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, previous)
defer func() {
if iter != nil {
_ = iter.Close()
}
}()
if err != nil {
if isTimeoutError(err) {
// Write may have succeeded, but we don't know
// return this info to the caller so they have the option of trying to find out by executing a read
return nil, &p.TimeoutError{Msg: fmt.Sprintf("CreateWorkflowExecution timed out. Error: %v", err)}
} else if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("CreateWorkflowExecution operation failed. Error: %v", err))
}
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateWorkflowExecution operation failed. Error: %v", err))
}
if !applied {
// There can be two reasons why the query does not get applied. Either the RangeID has changed, or
// the workflow is already started. Check the row info returned by Cassandra to figure out which one it is.
GetFailureReasonLoop:
for {
rowType, ok := previous["type"].(int)
if !ok {
// This should never happen, as all our rows have the type field.
break GetFailureReasonLoop
}
runID := previous["run_id"].(gocql.UUID).String()
if rowType == rowTypeShard {
if rangeID, ok := previous["range_id"].(int64); ok && rangeID != request.RangeID {
// CreateWorkflowExecution failed because rangeID was modified
return nil, &p.ShardOwnershipLostError{
ShardID: d.shardID,
Msg: fmt.Sprintf("Failed to create workflow execution. Request RangeID: %v, Actual RangeID: %v",
request.RangeID, rangeID),
}
}
} else if rowType == rowTypeExecution && runID == permanentRunID {
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
if state, ok := previous["execution_state"].([]byte); ok {
stateEncoding, ok := previous["execution_state_encoding"].(string)
if !ok {
return nil, newPersistedTypeMismatchError("execution_state_encoding", "", stateEncoding, previous)
}
// todo: Move serialization to manager
protoState, err := serialization.WorkflowExecutionStateFromBlob(state, stateEncoding)
if err != nil {
return nil, err
}
lastWriteVersion := previous["workflow_last_write_version"].(int64)
msg := fmt.Sprintf("Workflow execution already running. WorkflowId: %v, RunId: %v, rangeID: %v, columns: (%v)",
newWorkflow.ExecutionInfo.WorkflowId, protoState.RunId, request.RangeID, strings.Join(columns, ","))
if request.Mode == p.CreateWorkflowModeBrandNew {
// todo: Look at moving these errors upstream to manager
return nil, &p.WorkflowExecutionAlreadyStartedError{
Msg: msg,
StartRequestID: protoState.CreateRequestId,
RunID: protoState.RunId,
State: protoState.State,
Status: protoState.Status,
LastWriteVersion: lastWriteVersion,
}
}
return nil, &p.CurrentWorkflowConditionFailedError{Msg: msg}
}
if prevRunID := previous["current_run_id"].(gocql.UUID).String(); prevRunID != request.PreviousRunID {
// currentRunID on previous run has been changed, return to caller to handle
msg := fmt.Sprintf("Workflow execution creation condition failed by mismatch runID. WorkflowId: %v, Expected Current RunId: %v, Actual Current RunId: %v",
newWorkflow.ExecutionInfo.WorkflowId, request.PreviousRunID, prevRunID)
return nil, &p.CurrentWorkflowConditionFailedError{Msg: msg}
}
msg := fmt.Sprintf("Workflow execution creation condition failed. WorkflowId: %v, CurrentRunId: %v, columns: (%v)",
newWorkflow.ExecutionInfo.WorkflowId, newWorkflow.ExecutionState.RunId, strings.Join(columns, ","))
return nil, &p.CurrentWorkflowConditionFailedError{Msg: msg}
} else if rowType == rowTypeExecution && runID == newWorkflow.ExecutionState.RunId {
msg := fmt.Sprintf("Workflow execution already running. WorkflowId: %v, RunId: %v, rangeId: %v",
newWorkflow.ExecutionInfo.WorkflowId, newWorkflow.ExecutionState.RunId, request.RangeID)
mutableState, err := mutableStateFromRow(previous)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateWorkflowExecution operation error check failed. Error: %v", err))
}
lastWriteVersion := common.EmptyVersion
if mutableState.ExecutionInfo.VersionHistories != nil {
currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(mutableState.ExecutionInfo.VersionHistories)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateWorkflowExecution operation error check failed. Error: %v", err))
}
lastItem, err := versionhistory.GetLastVersionHistoryItem(currentVersionHistory)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateWorkflowExecution operation error check failed. Error: %v", err))
}
lastWriteVersion = lastItem.GetVersion()
}
return nil, &p.WorkflowExecutionAlreadyStartedError{
Msg: msg,
StartRequestID: newWorkflow.ExecutionState.CreateRequestId,
RunID: newWorkflow.ExecutionState.RunId,
State: newWorkflow.ExecutionState.State,
Status: newWorkflow.ExecutionState.Status,
LastWriteVersion: lastWriteVersion,
}
}
previous = make(map[string]interface{})
if !iter.MapScan(previous) {
// Cassandra returns the actual row that caused a condition failure, so we should always return
// from the checks above, but just in case.
break GetFailureReasonLoop
}
}
// At this point we only know that the write was not applied.
// It's much safer to return ShardOwnershipLostError as the default to force the application to reload
// shard to recover from such errors
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
return nil, &p.ShardOwnershipLostError{
ShardID: d.shardID,
Msg: fmt.Sprintf("Failed to create workflow execution. Request RangeID: %v, columns: (%v)",
request.RangeID, strings.Join(columns, ",")),
}
}
return &p.CreateWorkflowExecutionResponse{}, nil
}
func (d *cassandraPersistence) GetWorkflowExecution(request *p.GetWorkflowExecutionRequest) (
*p.InternalGetWorkflowExecutionResponse, error) {
execution := request.Execution
query := d.session.Query(templateGetWorkflowExecutionQuery,
d.shardID,
rowTypeExecution,
request.NamespaceID,
execution.WorkflowId,
execution.GetRunId(),
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
result := make(map[string]interface{})
if err := query.MapScan(result); err != nil {
return nil, convertCommonErrors("GetWorkflowExecution", err)
}
state, err := mutableStateFromRow(result)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetWorkflowExecution operation failed. Error: %v", err))
}
activityInfos := make(map[int64]*persistencespb.ActivityInfo)
aMap := result["activity_map"].(map[int64][]byte)
aMapEncoding := result["activity_map_encoding"].(string)
for key, value := range aMap {
aInfo, err := serialization.ActivityInfoFromBlob(value, aMapEncoding)
if err != nil {
return nil, err
}
activityInfos[key] = aInfo
}
state.ActivityInfos = activityInfos
timerInfos := make(map[string]*persistencespb.TimerInfo)
tMapEncoding := result["timer_map_encoding"].(string)
tMap := result["timer_map"].(map[string][]byte)
for key, value := range tMap {
info, err := serialization.TimerInfoFromBlob(value, tMapEncoding)
if err != nil {
return nil, err
}
timerInfos[key] = info
}
state.TimerInfos = timerInfos
childExecutionInfos := make(map[int64]*persistencespb.ChildExecutionInfo)
cMap := result["child_executions_map"].(map[int64][]byte)
cMapEncoding := result["child_executions_map_encoding"].(string)
for key, value := range cMap {
cInfo, err := serialization.ChildExecutionInfoFromBlob(value, cMapEncoding)
if err != nil {
return nil, err
}
childExecutionInfos[key] = cInfo
}
state.ChildExecutionInfos = childExecutionInfos
requestCancelInfos := make(map[int64]*persistencespb.RequestCancelInfo)
rMapEncoding := result["request_cancel_map_encoding"].(string)
rMap := result["request_cancel_map"].(map[int64][]byte)
for key, value := range rMap {
info, err := serialization.RequestCancelInfoFromBlob(value, rMapEncoding)
if err != nil {
return nil, err
}
requestCancelInfos[key] = info
}
state.RequestCancelInfos = requestCancelInfos
signalInfos := make(map[int64]*persistencespb.SignalInfo)
sMapEncoding := result["signal_map_encoding"].(string)
sMap := result["signal_map"].(map[int64][]byte)
for key, value := range sMap {
info, err := serialization.SignalInfoFromBlob(value, sMapEncoding)
if err != nil {
return nil, err
}
signalInfos[key] = info
}
state.SignalInfos = signalInfos
sList := result["signal_requested"].([]gocql.UUID)
signalRequestedIDs := make([]string, len(sList))
for i, v := range sList {
signalRequestedIDs[i] = v.String()
}
state.SignalRequestedIDs = signalRequestedIDs
eList := result["buffered_events_list"].([]map[string]interface{})
bufferedEventsBlobs := make([]*commonpb.DataBlob, 0, len(eList))
for _, v := range eList {
blob := createHistoryEventBatchBlob(v)
bufferedEventsBlobs = append(bufferedEventsBlobs, blob)
}
state.BufferedEvents = bufferedEventsBlobs
cs, err := serialization.ChecksumFromBlob(result["checksum"].([]byte), result["checksum_encoding"].(string))
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetWorkflowExecution operation failed. Error: %v", err))
}
state.Checksum = cs
return &p.InternalGetWorkflowExecutionResponse{State: state}, nil
}
func protoExecutionStateFromRow(result map[string]interface{}) (*persistencespb.WorkflowExecutionState, error) {
state, ok := result["execution_state"].([]byte)
if !ok {
return nil, newPersistedTypeMismatchError("execution_state", "", state, result)
}
stateEncoding, ok := result["execution_state_encoding"].(string)
if !ok {
return nil, newPersistedTypeMismatchError("execution_state_encoding", "", stateEncoding, result)
}
protoState, err := serialization.WorkflowExecutionStateFromBlob(state, stateEncoding)
if err != nil {
return nil, err
}
return protoState, nil
}
func (d *cassandraPersistence) UpdateWorkflowExecution(request *p.InternalUpdateWorkflowExecutionRequest) error {
batch := d.session.NewBatch(gocql.LoggedBatch)
updateWorkflow := request.UpdateWorkflowMutation
newWorkflow := request.NewWorkflowSnapshot
namespaceID := updateWorkflow.ExecutionInfo.NamespaceId
workflowID := updateWorkflow.ExecutionInfo.WorkflowId
runID := updateWorkflow.ExecutionState.RunId
shardID := d.shardID
if err := p.ValidateUpdateWorkflowModeState(
request.Mode,
updateWorkflow,
newWorkflow,
); err != nil {
return err
}
switch request.Mode {
case p.UpdateWorkflowModeBypassCurrent:
if err := d.assertNotCurrentExecution(
namespaceID,
workflowID,
runID); err != nil {
return err
}
case p.UpdateWorkflowModeUpdateCurrent:
if newWorkflow != nil {
newLastWriteVersion := newWorkflow.LastWriteVersion
newNamespaceID := newWorkflow.ExecutionInfo.NamespaceId
newWorkflowID := newWorkflow.ExecutionInfo.WorkflowId
newRunID := newWorkflow.ExecutionState.RunId
if namespaceID != newNamespaceID {
return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution: cannot continue as new to another namespace"))
}
if err := createOrUpdateCurrentExecution(batch,
p.CreateWorkflowModeContinueAsNew,
d.shardID,
newNamespaceID,
newWorkflowID,
newRunID,
newWorkflow.ExecutionState.State,
newWorkflow.ExecutionState.Status,
newWorkflow.ExecutionState.CreateRequestId,
newLastWriteVersion,
runID,
0, // for continue as new, this is not used
); err != nil {
return err
}
} else {
lastWriteVersion := updateWorkflow.LastWriteVersion
executionStateDatablob, err := serialization.WorkflowExecutionStateToBlob(updateWorkflow.ExecutionState)
if err != nil {
return err
}
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
updateWorkflow.ExecutionState.State,
d.shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
runID,
)
}
default:
return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution: unknown mode: %v", request.Mode))
}
if err := applyWorkflowMutationBatch(batch, shardID, &updateWorkflow); err != nil {
return err
}
if newWorkflow != nil {
if err := applyWorkflowSnapshotBatchAsNew(batch,
d.shardID,
newWorkflow,
); err != nil {
return err
}
}
// Verifies that the RangeID has not changed
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
d.shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, previous)
defer func() {
if iter != nil {
_ = iter.Close()
}
}()
if err != nil {
if isTimeoutError(err) {
// Write may have succeeded, but we don't know
// return this info to the caller so they have the option of trying to find out by executing a read
return &p.TimeoutError{Msg: fmt.Sprintf("UpdateWorkflowExecution timed out. Error: %v", err)}
} else if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("UpdateWorkflowExecution operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("UpdateWorkflowExecution operation failed. Error: %v", err))
}
if !applied {
return d.getExecutionConditionalUpdateFailure(previous, iter, updateWorkflow.ExecutionState.RunId, updateWorkflow.Condition, request.RangeID, updateWorkflow.ExecutionState.RunId)
}
return nil
}
func (d *cassandraPersistence) ConflictResolveWorkflowExecution(request *p.InternalConflictResolveWorkflowExecutionRequest) error {
batch := d.session.NewBatch(gocql.LoggedBatch)
currentWorkflow := request.CurrentWorkflowMutation
resetWorkflow := request.ResetWorkflowSnapshot
newWorkflow := request.NewWorkflowSnapshot
shardID := d.shardID
namespaceID := resetWorkflow.ExecutionInfo.NamespaceId
workflowID := resetWorkflow.ExecutionInfo.WorkflowId
if err := p.ValidateConflictResolveWorkflowModeState(
request.Mode,
resetWorkflow,
newWorkflow,
currentWorkflow,
); err != nil {
return err
}
var prevRunID string
switch request.Mode {
case p.ConflictResolveWorkflowModeBypassCurrent:
if err := d.assertNotCurrentExecution(
namespaceID,
workflowID,
resetWorkflow.ExecutionState.RunId); err != nil {
return err
}
case p.ConflictResolveWorkflowModeUpdateCurrent:
executionState := resetWorkflow.ExecutionState
lastWriteVersion := resetWorkflow.LastWriteVersion
if newWorkflow != nil {
lastWriteVersion = newWorkflow.LastWriteVersion
executionState = newWorkflow.ExecutionState
}
runID := executionState.RunId
createRequestID := executionState.CreateRequestId
state := executionState.State
status := executionState.Status
executionStateDatablob, err := serialization.WorkflowExecutionStateToBlob(&persistencespb.WorkflowExecutionState{
RunId: runID,
CreateRequestId: createRequestID,
State: state,
Status: status,
})
if err != nil {
return serviceerror.NewInternal(fmt.Sprintf("ConflictResolveWorkflowExecution operation failed. Error: %v", err))
}
if currentWorkflow != nil {
prevRunID = currentWorkflow.ExecutionState.RunId
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
state,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
prevRunID,
)
} else {
// reset workflow is current
prevRunID = resetWorkflow.ExecutionState.RunId
batch.Query(templateUpdateCurrentWorkflowExecutionQuery,
runID,
executionStateDatablob.Data,
executionStateDatablob.EncodingType.String(),
lastWriteVersion,
state,
shardID,
rowTypeExecution,
namespaceID,
workflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
prevRunID,
)
}
default:
return serviceerror.NewInternal(fmt.Sprintf("ConflictResolveWorkflowExecution: unknown mode: %v", request.Mode))
}
if err := applyWorkflowSnapshotBatchAsReset(batch,
shardID,
&resetWorkflow); err != nil {
return err
}
if currentWorkflow != nil {
if err := applyWorkflowMutationBatch(batch, shardID, currentWorkflow); err != nil {
return err
}
}
if newWorkflow != nil {
if err := applyWorkflowSnapshotBatchAsNew(batch, shardID, newWorkflow); err != nil {
return err
}
}
// Verifies that the RangeID has not changed
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
d.shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, previous)
defer func() {
if iter != nil {
_ = iter.Close()
}
}()
if err != nil {
if isTimeoutError(err) {
// Write may have succeeded, but we don't know
// return this info to the caller so they have the option of trying to find out by executing a read
return &p.TimeoutError{Msg: fmt.Sprintf("ConflictResolveWorkflowExecution timed out. Error: %v", err)}
} else if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("ConflictResolveWorkflowExecution operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("ConflictResolveWorkflowExecution operation failed. Error: %v", err))
}
if !applied {
return d.getExecutionConditionalUpdateFailure(previous, iter, resetWorkflow.ExecutionState.RunId, request.ResetWorkflowSnapshot.Condition, request.RangeID, prevRunID)
}
return nil
}
func (d *cassandraPersistence) getExecutionConditionalUpdateFailure(previous map[string]interface{}, iter *gocql.Iter, requestRunID string, requestCondition int64, requestRangeID int64, requestConditionalRunID string) error {
// There can be three reasons why the query does not get applied: the RangeID has changed, or the next_event_id or current_run_id check failed.
// Check the row info returned by Cassandra to figure out which one it is.
rangeIDUnmatch := false
actualRangeID := int64(0)
nextEventIDUnmatch := false
actualNextEventID := int64(0)
runIDUnmatch := false
actualCurrRunID := ""
allPrevious := []map[string]interface{}{}
GetFailureReasonLoop:
for {
rowType, ok := previous["type"].(int)
if !ok {
// This should never happen, as all our rows have the type field.
break GetFailureReasonLoop
}
runID := previous["run_id"].(gocql.UUID).String()
if rowType == rowTypeShard {
if actualRangeID, ok = previous["range_id"].(int64); ok && actualRangeID != requestRangeID {
// UpdateWorkflowExecution failed because rangeID was modified
rangeIDUnmatch = true
}
} else if rowType == rowTypeExecution && runID == requestRunID {
if actualNextEventID, ok = previous["next_event_id"].(int64); ok && actualNextEventID != requestCondition {
// UpdateWorkflowExecution failed because next event ID is unexpected
nextEventIDUnmatch = true
}
} else if rowType == rowTypeExecution && runID == permanentRunID {
// UpdateWorkflowExecution failed because current_run_id is unexpected
if actualCurrRunID = previous["current_run_id"].(gocql.UUID).String(); actualCurrRunID != requestConditionalRunID {
// UpdateWorkflowExecution failed because next event ID is unexpected
runIDUnmatch = true
}
}
allPrevious = append(allPrevious, previous)
previous = make(map[string]interface{})
if !iter.MapScan(previous) {
// Cassandra returns the actual row that caused a condition failure, so we should always return
// from the checks above, but just in case.
break GetFailureReasonLoop
}
}
if rangeIDUnmatch {
return &p.ShardOwnershipLostError{
ShardID: d.shardID,
Msg: fmt.Sprintf("Failed to update mutable state. Request RangeID: %v, Actual RangeID: %v",
requestRangeID, actualRangeID),
}
}
if runIDUnmatch {
return &p.CurrentWorkflowConditionFailedError{
Msg: fmt.Sprintf("Failed to update mutable state. Request Condition: %v, Actual Value: %v, Request Current RunId: %v, Actual Value: %v",
requestCondition, actualNextEventID, requestConditionalRunID, actualCurrRunID),
}
}
if nextEventIDUnmatch {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to update mutable state. Request Condition: %v, Actual Value: %v, Request Current RunId: %v, Actual Value: %v",
requestCondition, actualNextEventID, requestConditionalRunID, actualCurrRunID),
}
}
// At this point we only know that the write was not applied.
var columns []string
columnID := 0
for _, previous := range allPrevious {
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%v: %s=%v", columnID, k, v))
}
columnID++
}
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to reset mutable state. ShardId: %v, RangeId: %v, Condition: %v, Request Current RunId: %v, columns: (%v)",
d.shardID, requestRangeID, requestCondition, requestConditionalRunID, strings.Join(columns, ",")),
}
}
func (d *cassandraPersistence) assertNotCurrentExecution(
namespaceID string,
workflowID string,
runID string,
) error {
if resp, err := d.GetCurrentExecution(&p.GetCurrentExecutionRequest{
NamespaceID: namespaceID,
WorkflowID: workflowID,
}); err != nil {
if _, ok := err.(*serviceerror.NotFound); ok {
// allow bypassing no current record
return nil
}
return err
} else if resp.RunID == runID {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("Assertion on current record failed. Current run ID is not expected: %v", resp.RunID),
}
}
return nil
}
func (d *cassandraPersistence) DeleteWorkflowExecution(request *p.DeleteWorkflowExecutionRequest) error {
query := d.session.Query(templateDeleteWorkflowExecutionMutableStateQuery,
d.shardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
request.RunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("DeleteWorkflowExecution operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("DeleteWorkflowExecution operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) DeleteCurrentWorkflowExecution(request *p.DeleteCurrentWorkflowExecutionRequest) error {
query := d.session.Query(templateDeleteWorkflowExecutionCurrentRowQuery,
d.shardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID,
request.RunID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("DeleteWorkflowCurrentRow operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("DeleteWorkflowCurrentRow operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) GetCurrentExecution(request *p.GetCurrentExecutionRequest) (*p.GetCurrentExecutionResponse,
error) {
query := d.session.Query(templateGetCurrentExecutionQuery,
d.shardID,
rowTypeExecution,
request.NamespaceID,
request.WorkflowID,
permanentRunID,
defaultVisibilityTimestamp,
rowTypeExecutionTaskID)
result := make(map[string]interface{})
if err := query.MapScan(result); err != nil {
if err == gocql.ErrNotFound {
return nil, serviceerror.NewNotFound(fmt.Sprintf("Workflow execution not found. WorkflowId: %v", request.WorkflowID))
} else if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("GetCurrentExecution operation failed. Error: %v", err))
}
return nil, serviceerror.NewInternal(fmt.Sprintf("GetCurrentExecution operation failed. Error: %v", err))
}
currentRunID := result["current_run_id"].(gocql.UUID).String()
lastWriteVersion := result["workflow_last_write_version"].(int64)
executionState, err := protoExecutionStateFromRow(result)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetCurrentExecution operation failed. Error: %v", err))
}
return &p.GetCurrentExecutionResponse{
RunID: currentRunID,
StartRequestID: executionState.CreateRequestId,
State: executionState.State,
Status: executionState.Status,
LastWriteVersion: lastWriteVersion,
}, nil
}
func (d *cassandraPersistence) ListConcreteExecutions(
request *p.ListConcreteExecutionsRequest,
) (*p.InternalListConcreteExecutionsResponse, error) {
query := d.session.Query(
templateListWorkflowExecutionQuery,
d.shardID,
rowTypeExecution,
).PageSize(request.PageSize).PageState(request.PageToken)
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("ListConcreteExecutions operation failed. Not able to create query iterator.")
}
response := &p.InternalListConcreteExecutionsResponse{}
result := make(map[string]interface{})
for iter.MapScan(result) {
runID := result["run_id"].(gocql.UUID).String()
if runID == permanentRunID {
result = make(map[string]interface{})
continue
}
if _, ok := result["execution"]; ok {
state, err := mutableStateFromRow(result)
if err != nil {
return nil, err
}
response.States = append(response.States, state)
}
result = make(map[string]interface{})
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
return response, nil
}
func (d *cassandraPersistence) AddTasks(request *p.AddTasksRequest) error {
batch := d.session.NewBatch(gocql.LoggedBatch)
if err := applyTasks(
batch,
d.shardID,
request.NamespaceID,
request.WorkflowID,
request.RunID,
request.TransferTasks,
request.TimerTasks,
request.ReplicationTasks,
request.VisibilityTasks,
); err != nil {
return err
}
batch.Query(templateUpdateLeaseQuery,
request.RangeID,
d.shardID,
rowTypeShard,
rowTypeShardNamespaceID,
rowTypeShardWorkflowID,
rowTypeShardRunID,
defaultVisibilityTimestamp,
rowTypeShardTaskID,
request.RangeID,
)
previous := make(map[string]interface{})
applied, iter, err := d.session.MapExecuteBatchCAS(batch, previous)
defer func() {
if iter != nil {
_ = iter.Close()
}
}()
if err != nil {
if isTimeoutError(err) {
// Write may have succeeded, but we don't know
// return this info to the caller so they have the option of trying to find out by executing a read
return &p.TimeoutError{Msg: fmt.Sprintf("AddTasks timed out. Error: %v", err)}
} else if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("AddTasks operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("AddTasks operation failed. Error: %v", err))
}
if !applied {
if previousRangeID, ok := previous["range_id"].(int64); ok && previousRangeID != request.RangeID {
// CreateWorkflowExecution failed because rangeID was modified
return &p.ShardOwnershipLostError{
ShardID: d.shardID,
Msg: fmt.Sprintf("Failed to add tasks. Request RangeID: %v, Actual RangeID: %v", request.RangeID, previousRangeID),
}
} else {
return serviceerror.NewInternal("AddTasks operation failed: %v")
}
}
return nil
}
func (d *cassandraPersistence) GetTransferTask(request *p.GetTransferTaskRequest) (*p.GetTransferTaskResponse, error) {
shardID := d.shardID
taskID := request.TaskID
query := d.session.Query(templateGetTransferTaskQuery,
shardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, convertCommonErrors("GetTransferTask", err)
}
info, err := serialization.TransferTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetTransferTask", err)
}
return &p.GetTransferTaskResponse{TransferTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetTransferTasks(request *p.GetTransferTasksRequest) (*p.GetTransferTasksResponse, error) {
// Reading transfer tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetTransferTasksQuery,
d.shardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.MaxReadLevel,
).PageSize(request.BatchSize).PageState(request.NextPageToken)
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("GetTransferTasks operation failed. Not able to create query iterator.")
}
response := &p.GetTransferTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.TransferTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetTransferTasks", err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, convertCommonErrors("GetTransferTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) GetVisibilityTask(request *p.GetVisibilityTaskRequest) (*p.GetVisibilityTaskResponse, error) {
shardID := d.shardID
taskID := request.TaskID
query := d.session.Query(templateGetVisibilityTaskQuery,
shardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, convertCommonErrors("GetVisibilityTask", err)
}
info, err := serialization.VisibilityTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetVisibilityTask", err)
}
return &p.GetVisibilityTaskResponse{VisibilityTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetVisibilityTasks(request *p.GetVisibilityTasksRequest) (*p.GetVisibilityTasksResponse, error) {
// Reading Visibility tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetVisibilityTasksQuery,
d.shardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.MaxReadLevel,
).PageSize(request.BatchSize).PageState(request.NextPageToken)
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("GetVisibilityTasks operation failed. Not able to create query iterator.")
}
response := &p.GetVisibilityTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.VisibilityTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetVisibilityTasks", err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, convertCommonErrors("GetVisibilityTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) GetReplicationTask(request *p.GetReplicationTaskRequest) (*p.GetReplicationTaskResponse, error) {
shardID := d.shardID
taskID := request.TaskID
query := d.session.Query(templateGetReplicationTaskQuery,
shardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, convertCommonErrors("GetReplicationTask", err)
}
info, err := serialization.ReplicationTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetReplicationTask", err)
}
return &p.GetReplicationTaskResponse{ReplicationTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetReplicationTasks(
request *p.GetReplicationTasksRequest,
) (*p.GetReplicationTasksResponse, error) {
// Reading replication tasks need to be quorum level consistent, otherwise we could lose task
query := d.session.Query(templateGetReplicationTasksQuery,
d.shardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.MaxReadLevel,
).PageSize(request.BatchSize).PageState(request.NextPageToken)
return d.populateGetReplicationTasksResponse(query, "GetReplicationTasks")
}
func (d *cassandraPersistence) populateGetReplicationTasksResponse(
query *gocql.Query, operation string,
) (*p.GetReplicationTasksResponse, error) {
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("GetReplicationTasks operation failed. Not able to create query iterator.")
}
response := &p.GetReplicationTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.ReplicationTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors(operation, err)
}
response.Tasks = append(response.Tasks, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, convertCommonErrors(operation, err)
}
return response, nil
}
func (d *cassandraPersistence) CompleteTransferTask(request *p.CompleteTransferTaskRequest) error {
query := d.session.Query(templateCompleteTransferTaskQuery,
d.shardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteTransferTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("CompleteTransferTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) RangeCompleteTransferTask(request *p.RangeCompleteTransferTaskRequest) error {
query := d.session.Query(templateRangeCompleteTransferTaskQuery,
d.shardID,
rowTypeTransferTask,
rowTypeTransferNamespaceID,
rowTypeTransferWorkflowID,
rowTypeTransferRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("RangeCompleteTransferTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("RangeCompleteTransferTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) CompleteVisibilityTask(request *p.CompleteVisibilityTaskRequest) error {
query := d.session.Query(templateCompleteVisibilityTaskQuery,
d.shardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteVisibilityTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("CompleteVisibilityTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) RangeCompleteVisibilityTask(request *p.RangeCompleteVisibilityTaskRequest) error {
query := d.session.Query(templateRangeCompleteVisibilityTaskQuery,
d.shardID,
rowTypeVisibilityTask,
rowTypeVisibilityTaskNamespaceID,
rowTypeVisibilityTaskWorkflowID,
rowTypeVisibilityTaskRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("RangeCompleteVisibilityTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("RangeCompleteVisibilityTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) CompleteReplicationTask(request *p.CompleteReplicationTaskRequest) error {
query := d.session.Query(templateCompleteReplicationTaskQuery,
d.shardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteReplicationTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("CompleteReplicationTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) RangeCompleteReplicationTask(
request *p.RangeCompleteReplicationTaskRequest,
) error {
query := d.session.Query(templateCompleteReplicationTaskBeforeQuery,
d.shardID,
rowTypeReplicationTask,
rowTypeReplicationNamespaceID,
rowTypeReplicationWorkflowID,
rowTypeReplicationRunID,
defaultVisibilityTimestamp,
request.InclusiveEndTaskID,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("RangeCompleteReplicationTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("RangeCompleteReplicationTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) CompleteTimerTask(request *p.CompleteTimerTaskRequest) error {
ts := p.UnixNanoToDBTimestamp(request.VisibilityTimestamp.UnixNano())
query := d.session.Query(templateCompleteTimerTaskQuery,
d.shardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
ts,
request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteTimerTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("CompleteTimerTask operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) RangeCompleteTimerTask(request *p.RangeCompleteTimerTaskRequest) error {
start := p.UnixNanoToDBTimestamp(request.InclusiveBeginTimestamp.UnixNano())
end := p.UnixNanoToDBTimestamp(request.ExclusiveEndTimestamp.UnixNano())
query := d.session.Query(templateRangeCompleteTimerTaskQuery,
d.shardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
start,
end,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("RangeCompleteTimerTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("RangeCompleteTimerTask operation failed. Error: %v", err))
}
return nil
}
// From TaskManager interface
func (d *cassandraPersistence) LeaseTaskQueue(request *p.LeaseTaskQueueRequest) (*p.LeaseTaskQueueResponse, error) {
if len(request.TaskQueue) == 0 {
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue requires non empty task queue"))
}
now := timestamp.TimeNowPtrUtc()
query := d.session.Query(templateGetTaskQueue,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
)
var rangeID int64
var tlBytes []byte
var tlEncoding string
err := query.Scan(&rangeID, &tlBytes, &tlEncoding)
var tl *p.PersistedTaskQueueInfo
if err != nil {
if err == gocql.ErrNotFound { // First time task queue is used
tl = &p.PersistedTaskQueueInfo{
Data: &persistencespb.TaskQueueInfo{
NamespaceId: request.NamespaceID,
Name: request.TaskQueue,
TaskType: request.TaskType,
Kind: request.TaskQueueKind,
AckLevel: 0,
ExpiryTime: nil,
LastUpdateTime: now,
},
RangeID: initialRangeID,
}
datablob, err := serialization.TaskQueueInfoToBlob(tl.Data)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue operation failed during serialization. TaskQueue: %v, TaskType: %v, Error: %v", request.TaskQueue, request.TaskType, err))
}
query = d.session.Query(templateInsertTaskQueueQuery,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
initialRangeID,
datablob.Data,
datablob.EncodingType.String(),
)
} else if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("LeaseTaskQueue operation failed. TaskQueue: %v, TaskType: %v, Error: %v", request.TaskQueue, request.TaskType, err))
} else {
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue operation failed. TaskQueue: %v, TaskType: %v, Error: %v", request.TaskQueue, request.TaskType, err))
}
} else {
// if request.RangeID is > 0, we are trying to renew an already existing
// lease on the task queue. If request.RangeID=0, we are trying to steal
// the taskqueue from its current owner
if request.RangeID > 0 && request.RangeID != rangeID {
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("leaseTaskQueue:renew failed: taskQueue:%v, taskQueueType:%v, haveRangeID:%v, gotRangeID:%v",
request.TaskQueue, request.TaskType, request.RangeID, rangeID),
}
}
tli, err := serialization.TaskQueueInfoFromBlob(tlBytes, tlEncoding)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue operation failed during serialization. TaskQueue: %v, TaskType: %v, Error: %v", request.TaskQueue, request.TaskType, err))
}
tli.LastUpdateTime = now
tl = &p.PersistedTaskQueueInfo{
Data: tli,
RangeID: rangeID + 1,
}
datablob, err := serialization.TaskQueueInfoToBlob(tl.Data)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue operation failed during serialization. TaskQueue: %v, TaskType: %v, Error: %v", request.TaskQueue, request.TaskType, err))
}
query = d.session.Query(templateUpdateTaskQueueQuery,
rangeID+1,
datablob.Data,
datablob.EncodingType.String(),
request.NamespaceID,
&request.TaskQueue,
request.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
rangeID,
)
}
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("LeaseTaskQueue operation failed. Error: %v", err))
}
return nil, serviceerror.NewInternal(fmt.Sprintf("LeaseTaskQueue operation failed. Error : %v", err))
}
if !applied {
previousRangeID := previous["range_id"]
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("leaseTaskQueue: taskQueue:%v, taskQueueType:%v, haveRangeID:%v, gotRangeID:%v",
request.TaskQueue, request.TaskType, rangeID, previousRangeID),
}
}
return &p.LeaseTaskQueueResponse{TaskQueueInfo: tl}, nil
}
// From TaskManager interface
func (d *cassandraPersistence) UpdateTaskQueue(request *p.UpdateTaskQueueRequest) (*p.UpdateTaskQueueResponse, error) {
tli := *request.TaskQueueInfo
tli.LastUpdateTime = timestamp.TimeNowPtrUtc()
datablob, err := serialization.TaskQueueInfoToBlob(&tli)
if err != nil {
return nil, convertCommonErrors("UpdateTaskQueue", err)
}
var applied bool
previous := make(map[string]interface{})
if tli.Kind == enumspb.TASK_QUEUE_KIND_STICKY { // if task_queue is sticky, then update with TTL
batch := d.session.NewBatch(gocql.LoggedBatch)
batch.Query(templateUpdateTaskQueueQueryWithTTLPart1,
tli.GetNamespaceId(),
&tli.Name,
tli.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
stickyTaskQueueTTL,
)
batch.Query(templateUpdateTaskQueueQueryWithTTLPart2,
stickyTaskQueueTTL,
request.RangeID,
datablob.Data,
datablob.EncodingType.String(),
tli.GetNamespaceId(),
&tli.Name,
tli.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
applied, _, err = d.session.MapExecuteBatchCAS(batch, previous)
} else {
query := d.session.Query(templateUpdateTaskQueueQuery,
request.RangeID,
datablob.Data,
datablob.EncodingType.String(),
tli.GetNamespaceId(),
&tli.Name,
tli.TaskType,
rowTypeTaskQueue,
taskQueueTaskID,
request.RangeID,
)
applied, err = query.MapScanCAS(previous)
}
if err != nil {
if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("UpdateTaskQueue operation failed. Error: %v", err))
}
return nil, serviceerror.NewInternal(fmt.Sprintf("UpdateTaskQueue operation failed. Error: %v", err))
}
if !applied {
var columns []string
for k, v := range previous {
columns = append(columns, fmt.Sprintf("%s=%v", k, v))
}
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to update task queue. name: %v, type: %v, rangeID: %v, columns: (%v)",
tli.Name, tli.TaskType, request.RangeID, strings.Join(columns, ",")),
}
}
return &p.UpdateTaskQueueResponse{}, nil
}
func (d *cassandraPersistence) ListTaskQueue(_ *p.ListTaskQueueRequest) (*p.ListTaskQueueResponse, error) {
return nil, serviceerror.NewInternal(fmt.Sprintf("unsupported operation"))
}
func (d *cassandraPersistence) DeleteTaskQueue(request *p.DeleteTaskQueueRequest) error {
query := d.session.Query(templateDeleteTaskQueueQuery,
request.TaskQueue.NamespaceID, request.TaskQueue.Name, request.TaskQueue.TaskType, rowTypeTaskQueue, taskQueueTaskID, request.RangeID)
previous := make(map[string]interface{})
applied, err := query.MapScanCAS(previous)
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("DeleteTaskQueue operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("DeleteTaskQueue operation failed. Error: %v", err))
}
if !applied {
return &p.ConditionFailedError{
Msg: fmt.Sprintf("DeleteTaskQueue operation failed: expected_range_id=%v but found %+v", request.RangeID, previous),
}
}
return nil
}
// From TaskManager interface
func (d *cassandraPersistence) CreateTasks(request *p.CreateTasksRequest) (*p.CreateTasksResponse, error) {
batch := d.session.NewBatch(gocql.LoggedBatch)
namespaceID := request.TaskQueueInfo.Data.GetNamespaceId()
taskQueue := request.TaskQueueInfo.Data.Name
taskQueueType := request.TaskQueueInfo.Data.TaskType
for _, task := range request.Tasks {
ttl := GetTaskTTL(task.Data)
datablob, err := serialization.TaskInfoToBlob(task)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateTasks operation failed during serialization. Error : %v", err))
}
if ttl <= 0 || ttl > maxCassandraTTL {
batch.Query(templateCreateTaskQuery,
namespaceID,
taskQueue,
taskQueueType,
rowTypeTask,
task.GetTaskId(),
datablob.Data,
datablob.EncodingType.String())
} else {
batch.Query(templateCreateTaskWithTTLQuery,
namespaceID,
taskQueue,
taskQueueType,
rowTypeTask,
task.GetTaskId(),
datablob.Data,
datablob.EncodingType.String(),
ttl)
}
}
tl := *request.TaskQueueInfo.Data
tl.LastUpdateTime = timestamp.TimeNowPtrUtc()
datablob, err := serialization.TaskQueueInfoToBlob(&tl)
if err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateTasks operation failed during serialization. Error : %v", err))
}
// The following query is used to ensure that range_id didn't change
batch.Query(templateUpdateTaskQueueQuery,
request.TaskQueueInfo.RangeID,
datablob.Data,
datablob.EncodingType.String(),
namespaceID,
taskQueue,
taskQueueType,
rowTypeTaskQueue,
taskQueueTaskID,
request.TaskQueueInfo.RangeID,
)
previous := make(map[string]interface{})
applied, _, err := d.session.MapExecuteBatchCAS(batch, previous)
if err != nil {
if isThrottlingError(err) {
return nil, serviceerror.NewResourceExhausted(fmt.Sprintf("CreateTasks operation failed. Error: %v", err))
}
return nil, serviceerror.NewInternal(fmt.Sprintf("CreateTasks operation failed. Error : %v", err))
}
if !applied {
rangeID := previous["range_id"]
return nil, &p.ConditionFailedError{
Msg: fmt.Sprintf("Failed to create task. TaskQueue: %v, taskQueueType: %v, rangeID: %v, db rangeID: %v",
taskQueue, taskQueueType, request.TaskQueueInfo.RangeID, rangeID),
}
}
return &p.CreateTasksResponse{}, nil
}
func GetTaskTTL(task *persistencespb.TaskInfo) int64 {
var ttl int64 = 0
if task.ExpiryTime != nil {
expiryTtl := convert.Int64Ceil(time.Until(timestamp.TimeValue(task.ExpiryTime)).Seconds())
// 0 means no ttl, we dont want that.
// Todo: Come back and correctly ignore expired in-memory tasks before persisting
if expiryTtl < 1 {
expiryTtl = 1
}
ttl = expiryTtl
}
return ttl
}
// From TaskManager interface
func (d *cassandraPersistence) GetTasks(request *p.GetTasksRequest) (*p.GetTasksResponse, error) {
if request.MaxReadLevel == nil {
return nil, serviceerror.NewInternal("getTasks: both readLevel and maxReadLevel MUST be specified for cassandra persistence")
}
if request.ReadLevel > *request.MaxReadLevel {
return &p.GetTasksResponse{}, nil
}
// Reading taskqueue tasks need to be quorum level consistent, otherwise we could lose tasks
query := d.session.Query(templateGetTasksQuery,
request.NamespaceID,
request.TaskQueue,
request.TaskType,
rowTypeTask,
request.ReadLevel,
*request.MaxReadLevel,
).PageSize(request.BatchSize)
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("GetTasks operation failed. Not able to create query iterator.")
}
response := &p.GetTasksResponse{}
task := make(map[string]interface{})
PopulateTasks:
for iter.MapScan(task) {
_, ok := task["task_id"]
if !ok { // no tasks, but static column record returned
continue
}
rawTask, ok := task["task"]
if !ok {
return nil, newFieldNotFoundError("task", task)
}
taskVal, ok := rawTask.([]byte)
if !ok {
var byteSliceType []byte
return nil, newPersistedTypeMismatchError("task", byteSliceType, rawTask, task)
}
rawEncoding, ok := task["task_encoding"]
if !ok {
return nil, newFieldNotFoundError("task_encoding", task)
}
encodingVal, ok := rawEncoding.(string)
if !ok {
var byteSliceType []byte
return nil, newPersistedTypeMismatchError("task_encoding", byteSliceType, rawEncoding, task)
}
t, err := serialization.TaskInfoFromBlob(taskVal, encodingVal)
if err != nil {
return nil, convertCommonErrors("GetTasks", err)
}
response.Tasks = append(response.Tasks, t)
if len(response.Tasks) == request.BatchSize {
break PopulateTasks
}
task = make(map[string]interface{}) // Reinitialize map as initialized fails on unmarshalling
}
if err := iter.Close(); err != nil {
return nil, serviceerror.NewInternal(fmt.Sprintf("GetTasks operation failed. Error: %v", err))
}
return response, nil
}
// From TaskManager interface
func (d *cassandraPersistence) CompleteTask(request *p.CompleteTaskRequest) error {
tli := request.TaskQueue
query := d.session.Query(templateCompleteTaskQuery,
tli.NamespaceID,
tli.Name,
tli.TaskType,
rowTypeTask,
request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteTask operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("CompleteTask operation failed. Error: %v", err))
}
return nil
}
// CompleteTasksLessThan deletes all tasks less than or equal to the given task id. This API ignores the
// Limit request parameter i.e. either all tasks leq the task_id will be deleted or an error will
// be returned to the caller
func (d *cassandraPersistence) CompleteTasksLessThan(request *p.CompleteTasksLessThanRequest) (int, error) {
query := d.session.Query(templateCompleteTasksLessThanQuery,
request.NamespaceID, request.TaskQueueName, request.TaskType, rowTypeTask, request.TaskID)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return 0, serviceerror.NewResourceExhausted(fmt.Sprintf("CompleteTasksLessThan operation failed. Error: %v", err))
}
return 0, serviceerror.NewInternal(fmt.Sprintf("CompleteTasksLessThan operation failed. Error: %v", err))
}
return p.UnknownNumRowsAffected, nil
}
func (d *cassandraPersistence) GetTimerTask(request *p.GetTimerTaskRequest) (*p.GetTimerTaskResponse, error) {
shardID := d.shardID
taskID := request.TaskID
visibilityTs := request.VisibilityTimestamp
query := d.session.Query(templateGetTimerTaskQuery,
shardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
visibilityTs,
taskID)
var data []byte
var encoding string
if err := query.Scan(&data, &encoding); err != nil {
return nil, convertCommonErrors("GetTimerTask", err)
}
info, err := serialization.TimerTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetTimerTask", err)
}
return &p.GetTimerTaskResponse{TimerTaskInfo: info}, nil
}
func (d *cassandraPersistence) GetTimerIndexTasks(request *p.GetTimerIndexTasksRequest) (*p.GetTimerIndexTasksResponse,
error) {
// Reading timer tasks need to be quorum level consistent, otherwise we could lose tasks
minTimestamp := p.UnixNanoToDBTimestamp(request.MinTimestamp.UnixNano())
maxTimestamp := p.UnixNanoToDBTimestamp(request.MaxTimestamp.UnixNano())
query := d.session.Query(templateGetTimerTasksQuery,
d.shardID,
rowTypeTimerTask,
rowTypeTimerNamespaceID,
rowTypeTimerWorkflowID,
rowTypeTimerRunID,
minTimestamp,
maxTimestamp,
).PageSize(request.BatchSize).PageState(request.NextPageToken)
iter := query.Iter()
if iter == nil {
return nil, serviceerror.NewInternal("GetTimerTasks operation failed. Not able to create query iterator.")
}
response := &p.GetTimerIndexTasksResponse{}
var data []byte
var encoding string
for iter.Scan(&data, &encoding) {
t, err := serialization.TimerTaskInfoFromBlob(data, encoding)
if err != nil {
return nil, convertCommonErrors("GetTimerIndexTasks", err)
}
response.Timers = append(response.Timers, t)
}
nextPageToken := iter.PageState()
response.NextPageToken = make([]byte, len(nextPageToken))
copy(response.NextPageToken, nextPageToken)
if err := iter.Close(); err != nil {
return nil, convertCommonErrors("GetTimerIndexTasks", err)
}
return response, nil
}
func (d *cassandraPersistence) PutReplicationTaskToDLQ(request *p.PutReplicationTaskToDLQRequest) error {
task := request.TaskInfo
datablob, err := serialization.ReplicationTaskInfoToBlob(task)
if err != nil {
return convertCommonErrors("PutReplicationTaskToDLQ", err)
}
// Use source cluster name as the workflow id for replication dlq
query := d.session.Query(templateCreateReplicationTaskQuery,
d.shardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
datablob.Data,
datablob.EncodingType.String(),
defaultVisibilityTimestamp,
task.GetTaskId())
err = query.Exec()
if err != nil {
return convertCommonErrors("PutReplicationTaskToDLQ", err)
}
return nil
}
func (d *cassandraPersistence) GetReplicationTasksFromDLQ(
request *p.GetReplicationTasksFromDLQRequest,
) (*p.GetReplicationTasksFromDLQResponse, error) {
// Reading replication tasks need to be quorum level consistent, otherwise we could lose tasks
query := d.session.Query(templateGetReplicationTasksQuery,
d.shardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.ReadLevel,
request.ReadLevel+int64(request.BatchSize),
).PageSize(request.BatchSize).PageState(request.NextPageToken)
return d.populateGetReplicationTasksResponse(query, "GetReplicationTasksFromDLQ")
}
func (d *cassandraPersistence) DeleteReplicationTaskFromDLQ(
request *p.DeleteReplicationTaskFromDLQRequest,
) error {
query := d.session.Query(templateCompleteReplicationTaskQuery,
d.shardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.TaskID,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("DeleteReplicationTaskFromDLQ operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("DeleteReplicationTaskFromDLQ operation failed. Error: %v", err))
}
return nil
}
func (d *cassandraPersistence) RangeDeleteReplicationTaskFromDLQ(
request *p.RangeDeleteReplicationTaskFromDLQRequest,
) error {
query := d.session.Query(templateRangeCompleteReplicationTaskQuery,
d.shardID,
rowTypeDLQ,
rowTypeDLQNamespaceID,
request.SourceClusterName,
rowTypeDLQRunID,
defaultVisibilityTimestamp,
request.ExclusiveBeginTaskID,
request.InclusiveEndTaskID,
)
err := query.Exec()
if err != nil {
if isThrottlingError(err) {
return serviceerror.NewResourceExhausted(fmt.Sprintf("RangeDeleteReplicationTaskFromDLQ operation failed. Error: %v", err))
}
return serviceerror.NewInternal(fmt.Sprintf("RangeDeleteReplicationTaskFromDLQ operation failed. Error: %v", err))
}
return nil
}
func mutableStateFromRow(result map[string]interface{}) (*p.InternalWorkflowMutableState, error) {
eiBytes, ok := result["execution"].([]byte)
if !ok {
return nil, newPersistedTypeMismatchError("execution", "", eiBytes, result)
}
eiEncoding, ok := result["execution_encoding"].(string)
if !ok {
return nil, newPersistedTypeMismatchError("execution_encoding", "", eiEncoding, result)
}
protoInfo, err := serialization.WorkflowExecutionInfoFromBlob(eiBytes, eiEncoding)
if err != nil {
return nil, err
}
nextEventID, ok := result["next_event_id"].(int64)
if !ok {
return nil, newPersistedTypeMismatchError("next_event_id", "", nextEventID, result)
}
protoState, err := protoExecutionStateFromRow(result)
if err != nil {
return nil, err
}
mutableState := &p.InternalWorkflowMutableState{
ExecutionInfo: protoInfo,
ExecutionState: protoState,
NextEventID: nextEventID,
}
return mutableState, nil
}
| 1 | 11,084 | This name is really confusing. May be just remove this const completely and hardcode `4` where it is used? | temporalio-temporal | go |
@@ -558,12 +558,6 @@ module RSpec::Core
context "with single pattern" do
before { config.pattern = "**/*_foo.rb" }
- it "loads files following pattern" do
- file = File.expand_path(File.dirname(__FILE__) + "/resources/a_foo.rb")
- assign_files_or_directories_to_run file
- expect(config.files_to_run).to include(file)
- end
-
it "loads files in directories following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir | 1 | require 'spec_helper'
require 'tmpdir'
require 'rspec/support/spec/in_sub_process'
module RSpec::Core
RSpec.describe Configuration do
include RSpec::Support::InSubProcess
let(:config) { Configuration.new }
let(:exclusion_filter) { config.exclusion_filter.rules }
let(:inclusion_filter) { config.inclusion_filter.rules }
shared_examples_for "warning of deprecated `:example_group` during filtering configuration" do |method, *args|
it "issues a deprecation warning when filtering by `:example_group`" do
args << { :example_group => { :file_location => /spec\/unit/ } }
expect_deprecation_with_call_site(__FILE__, __LINE__ + 1, /:example_group/)
config.__send__(method, *args)
end
end
describe '#deprecation_stream' do
it 'defaults to standard error' do
expect($rspec_core_without_stderr_monkey_patch.deprecation_stream).to eq STDERR
end
it 'is configurable' do
io = double 'deprecation io'
config.deprecation_stream = io
expect(config.deprecation_stream).to eq io
end
context 'when the reporter has already been initialized' do
before do
config.reporter
allow(config).to receive(:warn)
end
it 'prints a notice indicating the reconfigured output_stream will be ignored' do
config.deprecation_stream = double("IO")
expect(config).to have_received(:warn).with(/deprecation_stream.*#{__FILE__}:#{__LINE__ - 1}/)
end
it 'does not change the value of `deprecation_stream`' do
value = config.deprecation_stream
config.deprecation_stream = double("IO")
expect(config.deprecation_stream).to equal(value)
end
it 'does not print a warning if set to the value it already has' do
config.deprecation_stream = config.deprecation_stream
expect(config).not_to have_received(:warn)
end
end
end
describe "#output_stream" do
it 'defaults to standard output' do
expect(config.output_stream).to eq $stdout
end
it 'is configurable' do
io = double 'output io'
config.output_stream = io
expect(config.output_stream).to eq io
end
context 'when the reporter has already been initialized' do
before do
config.reporter
allow(config).to receive(:warn)
end
it 'prints a notice indicating the reconfigured output_stream will be ignored' do
config.output_stream = StringIO.new
expect(config).to have_received(:warn).with(/output_stream.*#{__FILE__}:#{__LINE__ - 1}/)
end
it 'does not change the value of `output_stream`' do
config.output_stream = StringIO.new
expect(config.output_stream).to eq($stdout)
end
it 'does not print a warning if set to the value it already has' do
config.output_stream = config.output_stream
expect(config).not_to have_received(:warn)
end
end
end
describe "#requires=" do
include_context "isolate load path mutation"
def absolute_path_to(dir)
File.expand_path("../../../../#{dir}", __FILE__)
end
it 'adds `lib` to the load path' do
lib_dir = absolute_path_to("lib")
$LOAD_PATH.delete(lib_dir)
expect($LOAD_PATH).not_to include(lib_dir)
config.requires = []
expect($LOAD_PATH).to include(lib_dir)
end
it 'adds the configured `default_path` to the load path' do
config.default_path = 'features'
foo_dir = absolute_path_to("features")
expect($LOAD_PATH).not_to include(foo_dir)
config.requires = []
expect($LOAD_PATH).to include(foo_dir)
end
it 'stores the required files' do
expect(config).to receive(:require).with('a/path')
config.requires = ['a/path']
expect(config.requires).to eq ['a/path']
end
context "when `default_path` refers to a file rather than a directory" do
it 'does not add it to the load path' do
config.default_path = 'Rakefile'
config.requires = []
expect($LOAD_PATH).not_to include(match(/Rakefile/))
end
end
end
describe "#load_spec_files" do
it "loads files using load" do
config.files_to_run = ["foo.bar", "blah_spec.rb"]
expect(config).to receive(:load).twice
config.load_spec_files
end
it "loads each file once, even if duplicated in list" do
config.files_to_run = ["a_spec.rb", "a_spec.rb"]
expect(config).to receive(:load).once
config.load_spec_files
end
end
describe "#mock_framework" do
it "defaults to :rspec" do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/rspec')
config.mock_framework
end
end
describe "#mock_framework="do
it "delegates to mock_with" do
expect(config).to receive(:mock_with).with(:rspec)
config.mock_framework = :rspec
end
end
shared_examples "a configurable framework adapter" do |m|
it "yields a config object if the framework_module supports it" do
custom_config = Struct.new(:custom_setting).new
mod = Module.new
allow(mod).to receive_messages(:configuration => custom_config)
config.send m, mod do |mod_config|
mod_config.custom_setting = true
end
expect(custom_config.custom_setting).to be_truthy
end
it "raises if framework module doesn't support configuration" do
mod = Module.new
expect {
config.send m, mod do |mod_config|
end
}.to raise_error(/must respond to `configuration`/)
end
end
describe "#mock_with" do
before { allow(config).to receive(:require) }
it_behaves_like "a configurable framework adapter", :mock_with
it "allows rspec-mocks to be configured with a provided block" do
mod = Module.new
expect(RSpec::Mocks.configuration).to receive(:add_stub_and_should_receive_to).with(mod)
config.mock_with :rspec do |c|
c.add_stub_and_should_receive_to mod
end
end
context "with a module" do
it "sets the mock_framework_adapter to that module" do
mod = Module.new
config.mock_with mod
expect(config.mock_framework).to eq(mod)
end
end
it 'uses the named adapter' do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/mocha')
stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
config.mock_with :mocha
end
it "uses the null adapter when given :nothing" do
expect(RSpec::Support).to receive(:require_rspec_core).with('mocking_adapters/null').and_call_original
config.mock_with :nothing
end
it "raises an error when given an unknown key" do
expect {
config.mock_with :crazy_new_mocking_framework_ive_not_yet_heard_of
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
it "raises an error when given another type of object" do
expect {
config.mock_with Object.new
}.to raise_error(ArgumentError, /unknown mocking framework/i)
end
context 'when there are already some example groups defined' do
before { allow(RSpec::Support).to receive(:require_rspec_core) }
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
mocha = stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
allow(mocha).to receive_messages(:framework_name => :mocha)
expect {
config.mock_with :mocha
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `mock_with :rspec` is re-configured' do
config.mock_framework # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.mock_with :rspec
end
it 'does not raise an error if re-setting the same config' do
mocha = stub_const("RSpec::Core::MockingAdapters::Mocha", Module.new)
allow(mocha).to receive_messages(:framework_name => :mocha)
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.mock_with :mocha
groups << double.as_null_object
config.mock_with :mocha
end
end
end
describe "#expectation_framework" do
it "defaults to :rspec" do
expect(config).to receive(:require).with('rspec/expectations')
config.expectation_frameworks
end
end
describe "#expectation_framework=" do
it "delegates to expect_with=" do
expect(config).to receive(:expect_with).with(:rspec)
config.expectation_framework = :rspec
end
end
def stub_expectation_adapters
stub_const("Test::Unit::Assertions", Module.new)
stub_const("Minitest::Assertions", Module.new)
stub_const("RSpec::Core::TestUnitAssertionsAdapter", Module.new)
stub_const("RSpec::Core::MinitestAssertionsAdapter", Module.new)
allow(config).to receive(:require)
end
describe "#expect_with" do
before do
stub_expectation_adapters
end
it_behaves_like "a configurable framework adapter", :expect_with
context "with :rspec" do
it "requires rspec/expectations" do
expect(config).to receive(:require).with('rspec/expectations')
config.expect_with :rspec
end
it "sets the expectation framework to ::RSpec::Matchers" do
config.expect_with :rspec
expect(config.expectation_frameworks).to eq [::RSpec::Matchers]
end
end
context "with :test_unit" do
it "requires rspec/core/test_unit_assertions_adapter" do
expect(config).to receive(:require).
with('rspec/core/test_unit_assertions_adapter')
config.expect_with :test_unit
end
it "sets the expectation framework to ::Test::Unit::Assertions" do
config.expect_with :test_unit
expect(config.expectation_frameworks).to eq [
::RSpec::Core::TestUnitAssertionsAdapter
]
end
end
context "with :minitest" do
it "requires rspec/core/minitest_assertions_adapter" do
expect(config).to receive(:require).
with('rspec/core/minitest_assertions_adapter')
config.expect_with :minitest
end
it "sets the expectation framework to ::Minitest::Assertions" do
config.expect_with :minitest
expect(config.expectation_frameworks).to eq [
::RSpec::Core::MinitestAssertionsAdapter
]
end
end
it "supports multiple calls" do
config.expect_with :rspec
config.expect_with :minitest
expect(config.expectation_frameworks).to eq [
RSpec::Matchers,
RSpec::Core::MinitestAssertionsAdapter
]
end
it "raises if block given with multiple args" do
expect {
config.expect_with :rspec, :minitest do |mod_config|
end
}.to raise_error(/expect_with only accepts/)
end
it "raises ArgumentError if framework is not supported" do
expect do
config.expect_with :not_supported
end.to raise_error(ArgumentError)
end
context 'when there are already some example groups defined' do
it 'raises an error since this setting must be applied before any groups are defined' do
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
expect {
config.expect_with :rspec
}.to raise_error(/must be configured before any example groups are defined/)
end
it 'does not raise an error if the default `expect_with :rspec` is re-configured' do
config.expectation_frameworks # called by RSpec when configuring the first example group
allow(RSpec.world).to receive(:example_groups).and_return([double.as_null_object])
config.expect_with :rspec
end
it 'does not raise an error if re-setting the same config' do
groups = []
allow(RSpec.world).to receive_messages(:example_groups => groups)
config.expect_with :minitest
groups << double.as_null_object
config.expect_with :minitest
end
end
end
describe "#expecting_with_rspec?" do
before do
stub_expectation_adapters
end
it "returns false by default" do
expect(config).not_to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec` has been configured" do
config.expect_with :rspec
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :rspec, :minitest` has been configured" do
config.expect_with :rspec, :minitest
expect(config).to be_expecting_with_rspec
end
it "returns true when `expect_with :minitest, :rspec` has been configured" do
config.expect_with :minitest, :rspec
expect(config).to be_expecting_with_rspec
end
it "returns false when `expect_with :minitest` has been configured" do
config.expect_with :minitest
expect(config).not_to be_expecting_with_rspec
end
end
describe "#files_to_run" do
it "loads files not following pattern if named explicitly" do
assign_files_or_directories_to_run "spec/rspec/core/resources/a_bar.rb"
expect(config.files_to_run).to eq([ "spec/rspec/core/resources/a_bar.rb"])
end
it "prevents repetition of dir when start of the pattern" do
config.pattern = "spec/**/a_spec.rb"
assign_files_or_directories_to_run "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it "does not prevent repetition of dir when later of the pattern" do
config.pattern = "rspec/**/a_spec.rb"
assign_files_or_directories_to_run "spec"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it 'reloads when `files_or_directories_to_run` is reassigned' do
config.pattern = "spec/**/a_spec.rb"
config.files_or_directories_to_run = "empty_dir"
expect {
config.files_or_directories_to_run = "spec"
}.to change { config.files_to_run }.
to(["spec/rspec/core/resources/a_spec.rb"])
end
context "with <path>:<line_number>" do
it "overrides inclusion filters set on config" do
config.filter_run_including :foo => :bar
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(inclusion_filter.size).to eq(1)
expect(inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(inclusion_filter[:locations].values.first).to eq([37])
end
it "overrides inclusion filters set before config" do
config.force(:inclusion_filter => {:foo => :bar})
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(inclusion_filter.size).to eq(1)
expect(inclusion_filter[:locations].keys.first).to match(/path\/to\/file\.rb$/)
expect(inclusion_filter[:locations].values.first).to eq([37])
end
it "clears exclusion filters set on config" do
config.exclusion_filter = { :foo => :bar }
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{exclusion_filter}"
end
it "clears exclusion filters set before config" do
config.force(:exclusion_filter => { :foo => :bar })
assign_files_or_directories_to_run "path/to/file.rb:37"
expect(config.exclusion_filter).to be_empty,
"expected exclusion filter to be empty:\n#{config.exclusion_filter}"
end
end
context "with default pattern" do
it "loads files named _spec.rb" do
assign_files_or_directories_to_run "spec/rspec/core/resources"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
it "loads files in Windows", :if => RSpec.world.windows_os? do
assign_files_or_directories_to_run "C:\\path\\to\\project\\spec\\sub\\foo_spec.rb"
expect(config.files_to_run).to eq(["C:/path/to/project/spec/sub/foo_spec.rb"])
end
it "loads files in Windows when directory is specified", :if => RSpec.world.windows_os? do
assign_files_or_directories_to_run "spec\\rspec\\core\\resources"
expect(config.files_to_run).to eq(["spec/rspec/core/resources/a_spec.rb"])
end
end
context "with default default_path" do
it "loads files in the default path when run by rspec" do
allow(config).to receive(:command) { 'rspec' }
assign_files_or_directories_to_run []
expect(config.files_to_run).not_to be_empty
end
it "loads files in the default path when run with DRB (e.g., spork)" do
allow(config).to receive(:command) { 'spork' }
allow(RSpec::Core::Runner).to receive(:running_in_drb?) { true }
assign_files_or_directories_to_run []
expect(config.files_to_run).not_to be_empty
end
it "does not load files in the default path when run by ruby" do
allow(config).to receive(:command) { 'ruby' }
assign_files_or_directories_to_run []
expect(config.files_to_run).to be_empty
end
end
def specify_consistent_ordering_of_files_to_run
allow(File).to receive(:directory?).with('a') { true }
globbed_files = nil
allow(Dir).to receive(:[]).with(/^\{?a/) { globbed_files }
orderings = [
%w[ a/1.rb a/2.rb a/3.rb ],
%w[ a/2.rb a/1.rb a/3.rb ],
%w[ a/3.rb a/2.rb a/1.rb ]
].map do |files|
globbed_files = files
yield
config.files_to_run
end
expect(orderings.uniq.size).to eq(1)
end
context 'when the given directories match the pattern' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = 'a/*.rb'
assign_files_or_directories_to_run 'a'
end
end
end
context 'when the pattern is given relative to the given directories' do
it 'orders the files in a consistent ordering, regardless of the underlying OS ordering' do
specify_consistent_ordering_of_files_to_run do
config.pattern = '*.rb'
assign_files_or_directories_to_run 'a'
end
end
end
context 'when given multiple file paths' do
it 'orders the files in a consistent ordering, regardless of the given order' do
allow(File).to receive(:directory?) { false } # fake it into thinking these a full file paths
files = ['a/b/c_spec.rb', 'c/b/a_spec.rb']
assign_files_or_directories_to_run(*files)
ordering_1 = config.files_to_run
assign_files_or_directories_to_run(*files.reverse)
ordering_2 = config.files_to_run
expect(ordering_1).to eq(ordering_2)
end
end
end
describe "#pattern" do
context "with single pattern" do
before { config.pattern = "**/*_foo.rb" }
it "loads files following pattern" do
file = File.expand_path(File.dirname(__FILE__) + "/resources/a_foo.rb")
assign_files_or_directories_to_run file
expect(config.files_to_run).to include(file)
end
it "loads files in directories following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
end
it "does not load files in directories not following pattern" do
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).not_to include("#{dir}/a_bar.rb")
end
end
context "with multiple patterns" do
it "supports comma separated values" do
config.pattern = "**/*_foo.rb,**/*_bar.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports comma separated values with spaces" do
config.pattern = "**/*_foo.rb, **/*_bar.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
it "supports curly braces glob syntax" do
config.pattern = "**/*_{foo,bar}.rb"
dir = File.expand_path(File.dirname(__FILE__) + "/resources")
assign_files_or_directories_to_run dir
expect(config.files_to_run).to include("#{dir}/a_foo.rb")
expect(config.files_to_run).to include("#{dir}/a_bar.rb")
end
end
context "after files have already been loaded" do
it 'will warn that it will have no effect' do
expect_warning_with_call_site(__FILE__, __LINE__ + 2, /has no effect/)
config.load_spec_files
config.pattern = "rspec/**/*.spec"
end
it 'will not warn if reset is called after load_spec_files' do
config.load_spec_files
config.reset
expect(RSpec).to_not receive(:warning)
config.pattern = "rspec/**/*.spec"
end
end
end
describe "path with line number" do
it "assigns the line number as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
end
context "with full_description set" do
it "overrides filters" do
config.filter_run :focused => true
config.full_description = "foo"
expect(inclusion_filter).not_to have_key(:focused)
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo"
expect(config.full_description).to eq(/foo/)
end
end
context "without full_description having been set" do
it 'returns nil from #full_description' do
expect(config.full_description).to eq nil
end
end
context "with line number" do
it "assigns the file and line number as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37]}})
end
it "assigns multiple files with line numbers as location filters" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37", "other_spec.rb:44"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37],
File.expand_path("other_spec.rb") => [44]}})
end
it "assigns files with multiple line numbers as location filters" do
assign_files_or_directories_to_run "path/to/a_spec.rb:37", "path/to/a_spec.rb:44"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [37, 44]}})
end
end
context "with multiple line numbers" do
it "assigns the file and line numbers as a location filter" do
assign_files_or_directories_to_run "path/to/a_spec.rb:1:3:5:7"
expect(inclusion_filter).to eq({:locations => {File.expand_path("path/to/a_spec.rb") => [1,3,5,7]}})
end
end
it "assigns the example name as the filter on description" do
config.full_description = "foo"
expect(inclusion_filter).to eq({:full_description => /foo/})
end
it "assigns the example names as the filter on description if description is an array" do
config.full_description = [ "foo", "bar" ]
expect(inclusion_filter).to eq({:full_description => Regexp.union(/foo/, /bar/)})
end
it 'is possible to access the full description regular expression' do
config.full_description = "foo","bar"
expect(config.full_description).to eq Regexp.union(/foo/,/bar/)
end
describe "#default_path" do
it 'defaults to "spec"' do
expect(config.default_path).to eq('spec')
end
end
describe "#include" do
include_examples "warning of deprecated `:example_group` during filtering configuration", :include, Enumerable
module InstanceLevelMethods
def you_call_this_a_blt?
"egad man, where's the mayo?!?!?"
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.include(InstanceLevelMethods, *args)
config.include_or_extend_modules.last.last
end
end
context "with no filter" do
it "includes the given module into each example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
context "with a filter" do
it "includes the given module into each matching example group" do
RSpec.configure do |c|
c.include(InstanceLevelMethods, :magic_key => :include)
end
group = ExampleGroup.describe('does like, stuff and junk', :magic_key => :include) { }
expect(group).not_to respond_to(:you_call_this_a_blt?)
expect(group.new.you_call_this_a_blt?).to eq("egad man, where's the mayo?!?!?")
end
end
end
describe "#extend" do
include_examples "warning of deprecated `:example_group` during filtering configuration", :extend, Enumerable
module ThatThingISentYou
def that_thing
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.extend(ThatThingISentYou, *args)
config.include_or_extend_modules.last.last
end
end
it "extends the given module into each matching example group" do
RSpec.configure do |c|
c.extend(ThatThingISentYou, :magic_key => :extend)
end
group = ExampleGroup.describe(ThatThingISentYou, :magic_key => :extend) { }
expect(group).to respond_to(:that_thing)
end
end
describe "#run_all_when_everything_filtered?" do
it "defaults to false" do
expect(config.run_all_when_everything_filtered?).to be_falsey
end
it "can be queried with question method" do
config.run_all_when_everything_filtered = true
expect(config.run_all_when_everything_filtered?).to be_truthy
end
end
describe "#color=" do
context "given true" do
before { config.color = true }
context "with config.tty? and output.tty?" do
it "sets color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => true
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "sets color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = true
allow(config.output_stream).to receive_messages :tty? => false
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with config.tty? and !output.tty?" do
it "does not set color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => true
expect(config.color_enabled?).to be_truthy
expect(config.color_enabled?(output)).to be_truthy
end
end
context "with !config.tty? and !output.tty?" do
it "does not set color_enabled?" do
output = StringIO.new
config.output_stream = output
config.tty = false
allow(config.output_stream).to receive_messages :tty? => false
expect(config.color_enabled?).to be_falsey
expect(config.color_enabled?(output)).to be_falsey
end
end
context "on windows" do
before do
@original_host = RbConfig::CONFIG['host_os']
RbConfig::CONFIG['host_os'] = 'mingw'
allow(config).to receive(:require)
end
after do
RbConfig::CONFIG['host_os'] = @original_host
end
context "with ANSICON available" do
around(:each) { |e| with_env_vars('ANSICON' => 'ANSICON', &e) }
it "enables colors" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.color = true
expect(config.color).to be_truthy
end
it "leaves output stream intact" do
config.output_stream = $stdout
allow(config).to receive(:require) do |what|
config.output_stream = 'foo' if what =~ /Win32/
end
config.color = true
expect(config.output_stream).to eq($stdout)
end
end
context "with ANSICON NOT available" do
before do
allow_warning
end
it "warns to install ANSICON" do
allow(config).to receive(:require) { raise LoadError }
expect_warning_with_call_site(__FILE__, __LINE__ + 1, /You must use ANSICON/)
config.color = true
end
it "sets color to false" do
allow(config).to receive(:require) { raise LoadError }
config.color = true
expect(config.color).to be_falsey
end
end
end
end
it "prefers incoming cli_args" do
config.output_stream = StringIO.new
allow(config.output_stream).to receive_messages :tty? => true
config.force :color => true
config.color = false
expect(config.color).to be_truthy
end
end
%w[formatter= add_formatter].each do |config_method|
describe "##{config_method}" do
it "delegates to formatters#add" do
expect(config.formatter_loader).to receive(:add).with('these','options')
config.send(config_method,'these','options')
end
end
end
describe "#formatters" do
it "returns a dup of the formatter_loader formatters" do
config.add_formatter 'doc'
config.formatters.clear
expect(config.formatters).to_not eq []
end
end
describe "#default_formatter" do
it 'defaults to `progress`' do
expect(config.default_formatter).to eq('progress')
end
it 'remembers changes' do
config.default_formatter = 'doc'
expect(config.default_formatter).to eq('doc')
end
context 'when another formatter has been set' do
it 'does not get used' do
config.default_formatter = 'doc'
config.add_formatter 'progress'
expect(used_formatters).to include(an_instance_of Formatters::ProgressFormatter)
expect(used_formatters).not_to include(an_instance_of Formatters::DocumentationFormatter)
end
end
context 'when no other formatter has been set' do
it 'gets used' do
config.default_formatter = 'doc'
expect(used_formatters).not_to include(an_instance_of Formatters::ProgressFormatter)
expect(used_formatters).to include(an_instance_of Formatters::DocumentationFormatter)
end
end
context 'using a legacy formatter as default' do
# Generating warnings during formatter initialisation triggers the
# ProxyReporter code path.
it 'remembers changes' do
legacy_formatter = Class.new
config = RSpec.configuration
config.default_formatter = legacy_formatter
config.reporter
expect(config.default_formatter).to eq(legacy_formatter)
end
end
def used_formatters
config.reporter # to force freezing of formatters
config.formatters
end
end
describe "#filter_run_including" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_including(*args)
config.inclusion_filter.rules
end
end
include_examples "warning of deprecated `:example_group` during filtering configuration", :filter_run_including
it "sets the filter with a hash" do
config.filter_run_including :foo => true
expect(inclusion_filter).to eq( {:foo => true} )
end
it "sets the filter with a symbol" do
config.filter_run_including :foo
expect(inclusion_filter).to eq( {:foo => true} )
end
it "merges with existing filters" do
config.filter_run_including :foo => true
config.filter_run_including :bar => false
expect(inclusion_filter).to eq( {:foo => true, :bar => false} )
end
end
describe "#filter_run_excluding" do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.filter_run_excluding(*args)
config.exclusion_filter.rules
end
end
include_examples "warning of deprecated `:example_group` during filtering configuration", :filter_run_excluding
it "sets the filter" do
config.filter_run_excluding :foo => true
expect(exclusion_filter).to eq( {:foo => true} )
end
it "sets the filter using a symbol" do
config.filter_run_excluding :foo
expect(exclusion_filter).to eq( {:foo => true} )
end
it "merges with existing filters" do
config.filter_run_excluding :foo => true
config.filter_run_excluding :bar => false
expect(exclusion_filter).to eq( {:foo => true, :bar => false} )
end
end
shared_examples_for "a spec filter" do |type|
describe "##{type}" do
it "returns {} even if set to nil" do
config.send("#{type}=", nil)
expect(send(type)).to eq({})
end
end
describe "##{type}=" do
it "treats symbols as hash keys with true values when told to" do
config.send("#{type}=", :foo)
expect(send(type)).to eq( {:foo => true} )
end
it "overrides any #{type} set on the command line or in configuration files" do
config.force(type => { :foo => :bar })
config.send("#{type}=", {:want => :this})
expect(send(type)).to eq( {:want => :this} )
end
include_examples "warning of deprecated `:example_group` during filtering configuration", :"#{type}="
end
end
it_behaves_like "a spec filter", :inclusion_filter
it_behaves_like "a spec filter", :exclusion_filter
describe "#treat_symbols_as_metadata_keys_with_true_values=" do
it 'is deprecated' do
expect_deprecation_with_call_site(__FILE__, __LINE__ + 1)
config.treat_symbols_as_metadata_keys_with_true_values = true
end
end
describe "#full_backtrace=" do
it "doesn't impact other instances of config" do
config_1 = Configuration.new
config_2 = Configuration.new
config_1.full_backtrace = true
expect(config_2.full_backtrace?).to be_falsey
end
end
describe "#backtrace_exclusion_patterns=" do
it "actually receives the new filter values" do
config = Configuration.new
config.backtrace_exclusion_patterns = [/.*/]
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe 'full_backtrace' do
it 'returns true when backtrace patterns is empty' do
config.backtrace_exclusion_patterns = []
expect(config.full_backtrace?).to eq true
end
it 'returns false when backtrace patterns isnt empty' do
config.backtrace_exclusion_patterns = [:lib]
expect(config.full_backtrace?).to eq false
end
end
describe "#backtrace_exclusion_patterns" do
it "can be appended to" do
config = Configuration.new
config.backtrace_exclusion_patterns << /.*/
expect(config.backtrace_formatter.exclude? "this").to be_truthy
end
end
describe "#libs=" do
include_context "isolate load path mutation"
it "adds directories to the LOAD_PATH" do
expect($LOAD_PATH).to receive(:unshift).with("a/dir")
config.libs = ["a/dir"]
end
end
describe "libs" do
include_context "isolate load path mutation"
it 'records paths added to the load path' do
config.libs = ["a/dir"]
expect(config.libs).to eq ["a/dir"]
end
end
describe "#define_derived_metadata" do
include_examples "warning of deprecated `:example_group` during filtering configuration", :define_derived_metadata
it 'allows the provided block to mutate example group metadata' do
RSpec.configuration.define_derived_metadata do |metadata|
metadata[:reverse_description] = metadata[:description].reverse
end
group = RSpec.describe("My group")
expect(group.metadata).to include(:description => "My group", :reverse_description => "puorg yM")
end
it 'allows the provided block to mutate example metadata' do
RSpec.configuration.define_derived_metadata do |metadata|
metadata[:reverse_description] = metadata[:description].reverse
end
ex = RSpec.describe("My group").example("foo")
expect(ex.metadata).to include(:description => "foo", :reverse_description => "oof")
end
it 'allows multiple configured blocks to be applied, in order of definition' do
RSpec.configure do |c|
c.define_derived_metadata { |m| m[:b1_desc] = m[:description] + " (block 1)" }
c.define_derived_metadata { |m| m[:b2_desc] = m[:b1_desc] + " (block 2)" }
end
group = RSpec.describe("bar")
expect(group.metadata).to include(:b1_desc => "bar (block 1)", :b2_desc => "bar (block 1) (block 2)")
end
it "derives metadata before the group or example blocks are eval'd so their logic can depend on the derived metadata" do
RSpec.configure do |c|
c.define_derived_metadata(:foo) do |metadata|
metadata[:bar] = "bar"
end
end
group_bar_value = example_bar_value = nil
RSpec.describe "Group", :foo do
group_bar_value = metadata[:bar]
example_bar_value = example("ex", :foo).metadata[:bar]
end
expect(group_bar_value).to eq("bar")
expect(example_bar_value).to eq("bar")
end
context "when passed a metadata filter" do
it 'only applies to the groups and examples that match that filter' do
RSpec.configure do |c|
c.define_derived_metadata(:apply => true) do |metadata|
metadata[:reverse_description] = metadata[:description].reverse
end
end
g1 = RSpec.describe("G1", :apply)
g2 = RSpec.describe("G2")
e1 = g1.example("E1")
e2 = g2.example("E2", :apply)
e3 = g2.example("E3")
expect(g1.metadata).to include(:reverse_description => "1G")
expect(g2.metadata).not_to include(:reverse_description)
expect(e1.metadata).to include(:reverse_description => "1E")
expect(e2.metadata).to include(:reverse_description => "2E")
expect(e3.metadata).not_to include(:reverse_description)
end
it 'applies if any of multiple filters apply (to align with module inclusion semantics)' do
RSpec.configure do |c|
c.define_derived_metadata(:a => 1, :b => 2) do |metadata|
metadata[:reverse_description] = metadata[:description].reverse
end
end
g1 = RSpec.describe("G1", :a => 1)
g2 = RSpec.describe("G2", :b => 2)
g3 = RSpec.describe("G3", :c => 3)
expect(g1.metadata).to include(:reverse_description => "1G")
expect(g2.metadata).to include(:reverse_description => "2G")
expect(g3.metadata).not_to include(:reverse_description)
end
it 'allows a metadata filter to be passed as a raw symbol' do
RSpec.configure do |c|
c.define_derived_metadata(:apply) do |metadata|
metadata[:reverse_description] = metadata[:description].reverse
end
end
g1 = RSpec.describe("G1", :apply)
g2 = RSpec.describe("G2")
expect(g1.metadata).to include(:reverse_description => "1G")
expect(g2.metadata).not_to include(:reverse_description)
end
end
end
describe "#add_setting" do
describe "with no modifiers" do
context "with no additional options" do
before do
config.add_setting :custom_option
end
it "defaults to nil" do
expect(config.custom_option).to be_nil
end
it "adds a predicate" do
expect(config.custom_option?).to be_falsey
end
it "can be overridden" do
config.custom_option = "a value"
expect(config.custom_option).to eq("a value")
end
end
context "with :default => 'a value'" do
before do
config.add_setting :custom_option, :default => 'a value'
end
it "defaults to 'a value'" do
expect(config.custom_option).to eq("a value")
end
it "returns true for the predicate" do
expect(config.custom_option?).to be_truthy
end
it "can be overridden with a truthy value" do
config.custom_option = "a new value"
expect(config.custom_option).to eq("a new value")
end
it "can be overridden with nil" do
config.custom_option = nil
expect(config.custom_option).to eq(nil)
end
it "can be overridden with false" do
config.custom_option = false
expect(config.custom_option).to eq(false)
end
end
end
context "with :alias_with => " do
before do
config.add_setting :custom_option, :alias_with => :another_custom_option
end
it "delegates the getter to the other option" do
config.another_custom_option = "this value"
expect(config.custom_option).to eq("this value")
end
it "delegates the setter to the other option" do
config.custom_option = "this value"
expect(config.another_custom_option).to eq("this value")
end
it "delegates the predicate to the other option" do
config.custom_option = true
expect(config.another_custom_option?).to be_truthy
end
end
end
describe "#configure_group" do
it "extends with 'extend'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.extend(mod, :foo => :bar)
config.configure_group(group)
expect(group).to be_a(mod)
end
it "extends with 'module'" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "requires only one matching filter" do
mod = Module.new
group = ExampleGroup.describe("group", :foo => :bar)
config.include(mod, :foo => :bar, :baz => :bam)
config.configure_group(group)
expect(group.included_modules).to include(mod)
end
it "includes each one before deciding whether to include the next" do
mod1 = Module.new do
def self.included(host)
host.metadata[:foo] = :bar
end
end
mod2 = Module.new
group = ExampleGroup.describe("group")
config.include(mod1)
config.include(mod2, :foo => :bar)
config.configure_group(group)
expect(group.included_modules).to include(mod1)
expect(group.included_modules).to include(mod2)
end
module IncludeOrExtendMeOnce
def self.included(host)
raise "included again" if host.instance_methods.include?(:foobar)
host.class_exec { def foobar; end }
end
def self.extended(host)
raise "extended again" if host.respond_to?(:foobar)
def host.foobar; end
end
end
it "doesn't include a module when already included in ancestor" do
config.include(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
it "doesn't extend when ancestor is already extended with same module" do
config.extend(IncludeOrExtendMeOnce, :foo => :bar)
group = ExampleGroup.describe("group", :foo => :bar)
child = group.describe("child")
config.configure_group(group)
config.configure_group(child)
end
end
describe "#alias_example_group_to" do
after do
RSpec::Core::DSL.example_group_aliases.delete(:my_group_method)
RSpec.module_exec do
class << self
undef :my_group_method if method_defined? :my_group_method
end
end
RSpec::Core::ExampleGroup.module_exec do
class << self
undef :my_group_method if method_defined? :my_group_method
end
end
end
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.alias_example_group_to :my_group_method, *args
group = ExampleGroup.my_group_method("a group")
group.metadata
end
end
it "allows adding additional metadata" do
config.alias_example_group_to :my_group_method, { :some => "thing" }
group = ExampleGroup.my_group_method("a group", :another => "thing")
expect(group.metadata).to include(:some => "thing", :another => "thing")
end
it "passes `nil` as the description arg when no args are given" do
config.alias_example_group_to :my_group_method, { :some => "thing" }
group = ExampleGroup.my_group_method
expect(group.metadata).to include(
:description_args => [nil],
:description => "",
:some => "thing"
)
end
context 'when the aliased method is used' do
it_behaves_like "metadata hash builder" do
def metadata_hash(*args)
config.alias_example_group_to :my_group_method
group = ExampleGroup.my_group_method("a group", *args)
group.metadata
end
end
end
end
describe "#alias_example_to" do
it_behaves_like "metadata hash builder" do
after do
RSpec::Core::ExampleGroup.module_exec do
class << self
undef :my_example_method if method_defined? :my_example_method
end
end
end
def metadata_hash(*args)
config.alias_example_to :my_example_method, *args
group = ExampleGroup.describe("group")
example = group.my_example_method("description")
example.metadata
end
end
end
describe "#reset" do
it "clears the reporter" do
expect(config.reporter).not_to be_nil
config.reset
expect(config.instance_variable_get("@reporter")).to be_nil
end
it "clears the formatters" do
config.add_formatter "doc"
config.reset
expect(config.formatters).to be_empty
end
end
describe "#force" do
context "for ordering options" do
let(:list) { [1, 2, 3, 4] }
let(:ordering_strategy) { config.ordering_registry.fetch(:global) }
let(:rng) { RSpec::Core::RandomNumberGenerator.new config.seed }
let(:shuffled) { Ordering::Random.new(config).shuffle list, rng }
specify "CLI `--order defined` takes precedence over `config.order = rand`" do
config.force :order => "defined"
config.order = "rand"
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
specify "CLI `--order rand:37` takes precedence over `config.order = defined`" do
config.force :order => "rand:37"
config.order = "defined"
expect(ordering_strategy.order(list)).to eq(shuffled)
end
specify "CLI `--seed 37` forces order and seed" do
config.force :seed => 37
config.order = "defined"
config.seed = 145
expect(ordering_strategy.order(list)).to eq(shuffled)
expect(config.seed).to eq(37)
end
specify "CLI `--order defined` takes precedence over `config.register_ordering(:global)`" do
config.force :order => "defined"
config.register_ordering(:global, &:reverse)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
it "forces 'false' value" do
config.add_setting :custom_option
config.custom_option = true
expect(config.custom_option?).to be_truthy
config.force :custom_option => false
expect(config.custom_option?).to be_falsey
config.custom_option = true
expect(config.custom_option?).to be_falsey
end
end
describe '#seed' do
it 'returns the seed as an int' do
config.seed = '123'
expect(config.seed).to eq(123)
end
end
describe "#seed_used?" do
def use_seed_on(registry)
registry.fetch(:random).order([1, 2])
end
it 'returns false if neither ordering registry used the seed' do
expect(config.seed_used?).to be false
end
it 'returns true if the ordering registry used the seed' do
use_seed_on(config.ordering_registry)
expect(config.seed_used?).to be true
end
end
describe '#order=' do
context 'given "random"' do
before do
config.seed = 7654
config.order = 'random'
end
it 'does not change the seed' do
expect(config.seed).to eq(7654)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "random:123"' do
before { config.order = 'random:123' }
it 'sets seed to 123' do
expect(config.seed).to eq(123)
end
it 'sets up random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
global_ordering = config.ordering_registry.fetch(:global)
expect(global_ordering).to be_an_instance_of(Ordering::Random)
end
end
context 'given "defined"' do
before do
config.order = 'rand:123'
config.order = 'defined'
end
it "does not change the seed" do
expect(config.seed).to eq(123)
end
it 'clears the random ordering' do
allow(RSpec).to receive_messages(:configuration => config)
list = [1, 2, 3, 4]
ordering_strategy = config.ordering_registry.fetch(:global)
expect(ordering_strategy.order(list)).to eq([1, 2, 3, 4])
end
end
end
describe "#register_ordering" do
def register_reverse_ordering
config.register_ordering(:reverse, &:reverse)
end
it 'stores the ordering for later use' do
register_reverse_ordering
list = [1, 2, 3]
strategy = config.ordering_registry.fetch(:reverse)
expect(strategy).to be_a(Ordering::Custom)
expect(strategy.order(list)).to eq([3, 2, 1])
end
it 'can register an ordering object' do
strategy = Object.new
def strategy.order(list)
list.reverse
end
config.register_ordering(:reverse, strategy)
list = [1, 2, 3]
fetched = config.ordering_registry.fetch(:reverse)
expect(fetched).to be(strategy)
expect(fetched.order(list)).to eq([3, 2, 1])
end
end
describe '#warnings' do
around do |example|
original_setting = $VERBOSE
example.run
$VERBOSE = original_setting
end
it "sets verbose to true when true" do
config.warnings = true
expect($VERBOSE).to eq true
end
it "sets verbose to false when true" do
config.warnings = false
expect($VERBOSE).to eq false
end
it 'returns the verbosity setting' do
config.warnings = true
expect(config.warnings?).to eq true
config.warnings = false
expect(config.warnings?).to eq false
end
it 'is loaded from config by #force' do
config.force :warnings => true
expect($VERBOSE).to eq true
end
end
describe "#raise_errors_for_deprecations!" do
it 'causes deprecations to raise errors rather than printing to the deprecation stream' do
config.deprecation_stream = stream = StringIO.new
config.raise_errors_for_deprecations!
expect {
config.reporter.deprecation(:deprecated => "foo", :call_site => "foo.rb:1")
}.to raise_error(RSpec::Core::DeprecationError, /foo is deprecated/)
expect(stream.string).to eq("")
end
end
describe "#expose_current_running_example_as" do
before { stub_const(Configuration::ExposeCurrentExample.name, Module.new) }
it 'exposes the current example via the named method' do
RSpec.configuration.expose_current_running_example_as :the_example
RSpec.configuration.expose_current_running_example_as :another_example_helper
value_1 = value_2 = nil
ExampleGroup.describe "Group" do
it "works" do
value_1 = the_example
value_2 = another_example_helper
end
end.run
expect(value_1).to be_an(RSpec::Core::Example)
expect(value_1.description).to eq("works")
expect(value_2).to be(value_1)
end
end
describe '#disable_monkey_patching!' do
let!(:config) { RSpec.configuration }
let!(:expectations) { RSpec::Expectations }
let!(:mocks) { RSpec::Mocks }
def in_fully_monkey_patched_rspec_environment
in_sub_process do
config.expose_dsl_globally = true
mocks.configuration.syntax = [:expect, :should]
mocks.configuration.patch_marshal_to_support_partial_doubles = true
expectations.configuration.syntax = [:expect, :should]
yield
end
end
it 'stops exposing the DSL methods globally' do
in_fully_monkey_patched_rspec_environment do
mod = Module.new
expect {
config.disable_monkey_patching!
}.to change { mod.respond_to?(:describe) }.from(true).to(false)
end
end
it 'stops using should syntax for expectations' do
in_fully_monkey_patched_rspec_environment do
obj = Object.new
config.expect_with :rspec
expect {
config.disable_monkey_patching!
}.to change { obj.respond_to?(:should) }.from(true).to(false)
end
end
it 'stops using should syntax for mocks' do
in_fully_monkey_patched_rspec_environment do
obj = Object.new
config.mock_with :rspec
expect {
config.disable_monkey_patching!
}.to change { obj.respond_to?(:should_receive) }.from(true).to(false)
end
end
it 'stops patching of Marshal' do
in_fully_monkey_patched_rspec_environment do
expect {
config.disable_monkey_patching!
}.to change { Marshal.respond_to?(:dump_with_rspec_mocks) }.from(true).to(false)
end
end
context 'when user did not configure mock framework' do
def emulate_not_configured_mock_framework
in_fully_monkey_patched_rspec_environment do
allow(config).to receive(:rspec_mocks_loaded?).and_return(false, true)
config.instance_variable_set :@mock_framework, nil
ExampleGroup.send :remove_class_variable, :@@example_groups_configured
yield
end
end
it 'disables monkey patching after example groups being configured' do
emulate_not_configured_mock_framework do
obj = Object.new
config.disable_monkey_patching!
expect {
ExampleGroup.ensure_example_groups_are_configured
}.to change { obj.respond_to?(:should_receive) }.from(true).to(false)
end
end
end
context 'when user did not configure expectation framework' do
def emulate_not_configured_expectation_framework
in_fully_monkey_patched_rspec_environment do
allow(config).to receive(:rspec_expectations_loaded?).and_return(false, true)
config.instance_variable_set :@expectation_frameworks, []
ExampleGroup.send :remove_class_variable, :@@example_groups_configured
yield
end
end
it 'disables monkey patching after example groups being configured' do
emulate_not_configured_expectation_framework do
obj = Object.new
config.disable_monkey_patching!
expect {
ExampleGroup.ensure_example_groups_are_configured
}.to change { obj.respond_to?(:should) }.from(true).to(false)
end
end
end
end
describe 'recording spec start time (for measuring load)' do
it 'returns a time' do
expect(config.start_time).to be_an_instance_of ::Time
end
it 'is configurable' do
config.start_time = 42
expect(config.start_time).to eq 42
end
end
describe "hooks" do
include_examples "warning of deprecated `:example_group` during filtering configuration", :before, :each
end
# assigns files_or_directories_to_run and triggers post-processing
# via `files_to_run`.
def assign_files_or_directories_to_run(*value)
config.files_or_directories_to_run = value
config.files_to_run
end
end
end
| 1 | 13,613 | Why did you remove this spec? | rspec-rspec-core | rb |
@@ -284,7 +284,6 @@ function getDefaultService() {
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
-Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for the Chrome
* web browser. Before using this module, you must download the latest
* [ChromeDriver release] and ensure it can be found on your system [PATH].
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link selenium-webdriver/remote.DriverService remote.DriverService}
* that manages the [ChromeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new Chrome
* session, such as which {@linkplain Options#setProxy proxy} to use,
* what {@linkplain Options#addExtensions extensions} to install, or
* what {@linkplain Options#addArguments command-line switches} to use when
* starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session with a clean user profile (unless otherwise
* configured through the {@link Options} class).
*
* __Headless Chrome__ <a id="headless"></a>
*
* To start Chrome in headless mode, simply call
* {@linkplain Options#headless Options.headless()}.
*
* let chrome = require('selenium-webdriver/chrome');
* let {Builder} = require('selenium-webdriver');
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().headless())
* .build();
*
* __Customizing the ChromeDriver Server__ <a id="custom-server"></a>
*
* By default, every Chrome session will use a single driver service, which is
* started the first time a {@link Driver} instance is created and terminated
* when this process exits. The default service will inherit its environment
* from the current process and direct all output to /dev/null. You may obtain
* a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* let chrome = require('selenium-webdriver/chrome');
*
* let service = new chrome.ServiceBuilder()
* .loggingTo('/my/log/file.txt')
* .enableVerboseLogging()
* .build();
*
* let options = new chrome.Options();
* // configure browser options ...
*
* let driver = chrome.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start Chrome using the
* {@link selenium-webdriver.Builder}.
*
* __Working with Android__ <a id="android"></a>
*
* The [ChromeDriver][android] supports running tests on the Chrome browser as
* well as [WebView apps][webview] starting in Android 4.4 (KitKat). In order to
* work with Android, you must first start the adb
*
* adb start-server
*
* By default, adb will start on port 5037. You may change this port, but this
* will require configuring a [custom server](#custom-server) that will connect
* to adb on the {@linkplain ServiceBuilder#setAdbPort correct port}:
*
* let service = new chrome.ServiceBuilder()
* .setAdbPort(1234)
* build();
* // etc.
*
* The ChromeDriver may be configured to launch Chrome on Android using
* {@link Options#androidChrome()}:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().androidChrome())
* .build();
*
* Alternatively, you can configure the ChromeDriver to launch an app with a
* Chrome-WebView by setting the {@linkplain Options#androidActivity
* androidActivity} option:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options()
* .androidPackage('com.example')
* .androidActivity('com.example.Activity'))
* .build();
*
* [Refer to the ChromeDriver site] for more information on using the
* [ChromeDriver with Android][android].
*
* [ChromeDriver]: https://chromedriver.chromium.org/
* [ChromeDriver release]: http://chromedriver.storage.googleapis.com/index.html
* [PATH]: http://en.wikipedia.org/wiki/PATH_%28variable%29
* [android]: https://chromedriver.chromium.org/getting-started/getting-started---android
* [webview]: https://developer.chrome.com/multidevice/webview/overview
*/
'use strict'
const io = require('./io')
const { Browser } = require('./lib/capabilities')
const chromium = require('./chromium')
/**
* Name of the ChromeDriver executable.
* @type {string}
* @const
*/
const CHROMEDRIVER_EXE =
process.platform === 'win32' ? 'chromedriver.exe' : 'chromedriver'
/** @type {remote.DriverService} */
let defaultService = null
/**
* Creates {@link selenium-webdriver/remote.DriverService} instances that manage
* a [ChromeDriver](https://chromedriver.chromium.org/)
* server in a child process.
*/
class ServiceBuilder extends chromium.ServiceBuilder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the chromedriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the chromedriver
* cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously()
if (!exe) {
throw Error(
`The ChromeDriver could not be found on the current PATH. Please ` +
`download the latest version of the ChromeDriver from ` +
`http://chromedriver.storage.googleapis.com/index.html and ensure ` +
`it can be found on your PATH.`
)
}
super(exe)
}
}
/**
* Class for managing ChromeDriver specific options.
*/
class Options extends chromium.Options {
/**
* Sets the path to the Chrome binary to use. On Mac OS X, this path should
* reference the actual Chrome executable, not just the application binary
* (e.g. "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome").
*
* The binary path be absolute or relative to the chromedriver server
* executable, but it must exist on the machine that will launch Chrome.
*
* @param {string} path The path to the Chrome binary to use.
* @return {!Options} A self reference.
*/
setChromeBinaryPath(path) {
return this.setBinaryPath(path)
}
/**
* Configures the ChromeDriver to launch Chrome on Android via adb. This
* function is shorthand for
* {@link #androidPackage options.androidPackage('com.android.chrome')}.
* @return {!Options} A self reference.
*/
androidChrome() {
return this.androidPackage('com.android.chrome')
}
/**
* Sets the path to Chrome's log file. This path should exist on the machine
* that will launch Chrome.
* @param {string} path Path to the log file to use.
* @return {!Options} A self reference.
*/
setChromeLogFile(path) {
return this.setBrowserLogFile(path)
}
/**
* Sets the directory to store Chrome minidumps in. This option is only
* supported when ChromeDriver is running on Linux.
* @param {string} path The directory path.
* @return {!Options} A self reference.
*/
setChromeMinidumpPath(path) {
return this.setBrowserMinidumpPath(path)
}
}
/**
* Creates a new WebDriver client for Chrome.
*/
class Driver extends chromium.Driver {
/**
* Creates a new session with the ChromeDriver.
*
* @param {(Capabilities|Options)=} opt_config The configuration options.
* @param {(remote.DriverService|http.Executor)=} opt_serviceExecutor Either
* a DriverService to use for the remote end, or a preconfigured executor
* for an externally managed endpoint. If neither is provided, the
* {@linkplain ##getDefaultService default service} will be used by
* default.
* @return {!Driver} A new driver instance.
*/
static createSession(opt_config, opt_serviceExecutor) {
let caps = opt_config || new Options()
return /** @type {!Driver} */ (super.createSession(
caps,
opt_serviceExecutor
))
}
}
/**
* _Synchronously_ attempts to locate the chromedriver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return io.findInPath(CHROMEDRIVER_EXE, true)
}
/**
* Sets the default service to use for new ChromeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
`The previously configured ChromeDriver service is still running. ` +
`You must shut it down before you may adjust its configuration.`
)
}
defaultService = service
}
/**
* Returns the default ChromeDriver service. If such a service has not been
* configured, one will be constructed using the default configuration for
* a ChromeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default ChromeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build()
}
return defaultService
}
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver
exports.Options = Options
exports.ServiceBuilder = ServiceBuilder
exports.getDefaultService = getDefaultService
exports.setDefaultService = setDefaultService
exports.locateSynchronously = locateSynchronously
| 1 | 18,744 | The vendor prefix is still being used on Chromium based browsers like Edge Chromium and Chrome. Did you mean to remove this? | SeleniumHQ-selenium | js |
@@ -33,6 +33,12 @@ class Config(object):
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self.identifier = None
self.force_no_cloudshell = bool(kwargs.get('no_cloudshell'))
+ self.project_id = kwargs.get('project_id') or None
+ if kwargs.get('composite_root_resources'):
+ tmpcrr = kwargs.get('composite_root_resources')
+ self.composite_root_resources = tmpcrr.split(',')
+ else:
+ self.composite_root_resources = []
self.service_account_key_file = kwargs.get('service_account_key_file')
self.vpc_host_project_id = kwargs.get('vpc_host_project_id')
self.vpc_host_network = kwargs.get('vpc_host_network') or 'default' | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti installer config object."""
import datetime
import hashlib
class Config(object):
"""Forseti installer config object."""
# pylint: disable=too-many-instance-attributes
# Having eight variables is reasonable in this case.
def __init__(self, **kwargs):
"""Initialize.
Args:
kwargs (dict): The kwargs.
"""
self.datetimestamp = (kwargs.get('datetimestamp') or
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self.identifier = None
self.force_no_cloudshell = bool(kwargs.get('no_cloudshell'))
self.service_account_key_file = kwargs.get('service_account_key_file')
self.vpc_host_project_id = kwargs.get('vpc_host_project_id')
self.vpc_host_network = kwargs.get('vpc_host_network') or 'default'
self.vpc_host_subnetwork = (
kwargs.get('vpc_host_subnetwork') or 'default')
self.config_filename = (kwargs.get('config') or
'forseti-setup-{}.cfg'.format(
self.datetimestamp))
self.bucket_location = kwargs.get('gcs_location')
self.installation_type = None
def generate_identifier(self, organization_id):
"""Generate resource unique identifier.
Hash the timestamp and organization id and take the first 7 characters.
Lowercase is needed because some resource name are not allowed to have
uppercase.
The reason why we need to use the hash as the identifier is to ensure
global uniqueness of the bucket names.
Args:
organization_id (str): Organization id.
"""
if not self.identifier:
message = organization_id + self.datetimestamp
hashed_message = hashlib.sha1(message.encode('UTF-8')).hexdigest()
self.identifier = hashed_message[:7].lower()
| 1 | 33,644 | .get will return None if there is no project_id key, so the or None part is redundant | forseti-security-forseti-security | py |
@@ -22,11 +22,16 @@ namespace OpenTelemetry.Metrics
{
public struct MetricPoint
{
+ internal DateTimeOffset StartTime;
+ internal DateTimeOffset EndTime;
+
private readonly AggregationType aggType;
private readonly HistogramBuckets histogramBuckets;
private long longVal;
+ private long longValue;
private long lastLongSum;
private double doubleVal;
+ private double doubleValue;
private double lastDoubleSum;
internal MetricPoint( | 1 | // <copyright file="MetricPoint.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
using System.Threading;
namespace OpenTelemetry.Metrics
{
public struct MetricPoint
{
private readonly AggregationType aggType;
private readonly HistogramBuckets histogramBuckets;
private long longVal;
private long lastLongSum;
private double doubleVal;
private double lastDoubleSum;
internal MetricPoint(
AggregationType aggType,
DateTimeOffset startTime,
string[] keys,
object[] values,
double[] histogramBounds)
{
Debug.Assert((keys?.Length ?? 0) == (values?.Length ?? 0), "Key and value array lengths did not match.");
this.aggType = aggType;
this.StartTime = startTime;
this.Tags = new ReadOnlyTagCollection(keys, values);
this.EndTime = default;
this.LongValue = default;
this.longVal = default;
this.lastLongSum = default;
this.DoubleValue = default;
this.doubleVal = default;
this.lastDoubleSum = default;
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
if (this.aggType == AggregationType.Histogram)
{
this.histogramBuckets = new HistogramBuckets(histogramBounds);
}
else if (this.aggType == AggregationType.HistogramSumCount)
{
this.histogramBuckets = new HistogramBuckets(null);
}
else
{
this.histogramBuckets = null;
}
}
/// <summary>
/// Gets the tags associated with the metric point.
/// </summary>
public ReadOnlyTagCollection Tags { get; }
public DateTimeOffset StartTime { get; internal set; }
public DateTimeOffset EndTime { get; internal set; }
public long LongValue { get; internal set; }
public double DoubleValue { get; internal set; }
internal MetricPointStatus MetricPointStatus { get; private set; }
public long GetHistogramCount()
{
if (this.aggType == AggregationType.Histogram || this.aggType == AggregationType.HistogramSumCount)
{
return this.histogramBuckets.Count;
}
else
{
throw new NotSupportedException($"{nameof(this.GetHistogramCount)} is not supported for this metric type.");
}
}
public double GetHistogramSum()
{
if (this.aggType == AggregationType.Histogram || this.aggType == AggregationType.HistogramSumCount)
{
return this.histogramBuckets.Sum;
}
else
{
throw new NotSupportedException($"{nameof(this.GetHistogramSum)} is not supported for this metric type.");
}
}
public HistogramBuckets GetHistogramBuckets()
{
if (this.aggType == AggregationType.Histogram || this.aggType == AggregationType.HistogramSumCount)
{
return this.histogramBuckets;
}
else
{
throw new NotSupportedException($"{nameof(this.GetHistogramBuckets)} is not supported for this metric type.");
}
}
internal void Update(long number)
{
switch (this.aggType)
{
case AggregationType.LongSumIncomingDelta:
{
Interlocked.Add(ref this.longVal, number);
break;
}
case AggregationType.LongSumIncomingCumulative:
{
Interlocked.Exchange(ref this.longVal, number);
break;
}
case AggregationType.LongGauge:
{
Interlocked.Exchange(ref this.longVal, number);
break;
}
case AggregationType.Histogram:
case AggregationType.HistogramSumCount:
{
this.Update((double)number);
break;
}
}
// There is a race with Snapshot:
// Update() updates the value
// Snapshot snapshots the value
// Snapshot sets status to NoCollectPending
// Update sets status to CollectPending -- this is not right as the Snapshot
// already included the updated value.
// In the absence of any new Update call until next Snapshot,
// this results in exporting an Update even though
// it had no update.
// TODO: For Delta, this can be mitigated
// by ignoring Zero points
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
internal void Update(double number)
{
switch (this.aggType)
{
case AggregationType.DoubleSumIncomingDelta:
{
double initValue, newValue;
do
{
initValue = this.doubleVal;
newValue = initValue + number;
}
while (initValue != Interlocked.CompareExchange(ref this.doubleVal, newValue, initValue));
break;
}
case AggregationType.DoubleSumIncomingCumulative:
{
Interlocked.Exchange(ref this.doubleVal, number);
break;
}
case AggregationType.DoubleGauge:
{
Interlocked.Exchange(ref this.doubleVal, number);
break;
}
case AggregationType.Histogram:
{
int i;
for (i = 0; i < this.histogramBuckets.ExplicitBounds.Length; i++)
{
// Upper bound is inclusive
if (number <= this.histogramBuckets.ExplicitBounds[i])
{
break;
}
}
lock (this.histogramBuckets.LockObject)
{
this.histogramBuckets.CountVal++;
this.histogramBuckets.SumVal += number;
this.histogramBuckets.BucketCounts[i]++;
}
break;
}
case AggregationType.HistogramSumCount:
{
lock (this.histogramBuckets.LockObject)
{
this.histogramBuckets.CountVal++;
this.histogramBuckets.SumVal += number;
}
break;
}
}
// There is a race with Snapshot:
// Update() updates the value
// Snapshot snapshots the value
// Snapshot sets status to NoCollectPending
// Update sets status to CollectPending -- this is not right as the Snapshot
// already included the updated value.
// In the absence of any new Update call until next Snapshot,
// this results in exporting an Update even though
// it had no update.
// TODO: For Delta, this can be mitigated
// by ignoring Zero points
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
internal void TakeSnapshot(bool outputDelta)
{
switch (this.aggType)
{
case AggregationType.LongSumIncomingDelta:
case AggregationType.LongSumIncomingCumulative:
{
if (outputDelta)
{
long initValue = Interlocked.Read(ref this.longVal);
this.LongValue = initValue - this.lastLongSum;
this.lastLongSum = initValue;
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
// Check again if value got updated, if yes reset status.
// This ensures no Updates get Lost.
if (initValue != Interlocked.Read(ref this.longVal))
{
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
}
else
{
this.LongValue = Interlocked.Read(ref this.longVal);
}
break;
}
case AggregationType.DoubleSumIncomingDelta:
case AggregationType.DoubleSumIncomingCumulative:
{
if (outputDelta)
{
// TODO:
// Is this thread-safe way to read double?
// As long as the value is not -ve infinity,
// the exchange (to 0.0) will never occur,
// but we get the original value atomically.
double initValue = Interlocked.CompareExchange(ref this.doubleVal, 0.0, double.NegativeInfinity);
this.DoubleValue = initValue - this.lastDoubleSum;
this.lastDoubleSum = initValue;
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
// Check again if value got updated, if yes reset status.
// This ensures no Updates get Lost.
if (initValue != Interlocked.CompareExchange(ref this.doubleVal, 0.0, double.NegativeInfinity))
{
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
}
else
{
// TODO:
// Is this thread-safe way to read double?
// As long as the value is not -ve infinity,
// the exchange (to 0.0) will never occur,
// but we get the original value atomically.
this.DoubleValue = Interlocked.CompareExchange(ref this.doubleVal, 0.0, double.NegativeInfinity);
}
break;
}
case AggregationType.LongGauge:
{
this.LongValue = Interlocked.Read(ref this.longVal);
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
// Check again if value got updated, if yes reset status.
// This ensures no Updates get Lost.
if (this.LongValue != Interlocked.Read(ref this.longVal))
{
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
break;
}
case AggregationType.DoubleGauge:
{
// TODO:
// Is this thread-safe way to read double?
// As long as the value is not -ve infinity,
// the exchange (to 0.0) will never occur,
// but we get the original value atomically.
this.DoubleValue = Interlocked.CompareExchange(ref this.doubleVal, 0.0, double.NegativeInfinity);
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
// Check again if value got updated, if yes reset status.
// This ensures no Updates get Lost.
if (this.DoubleValue != Interlocked.CompareExchange(ref this.doubleVal, 0.0, double.NegativeInfinity))
{
this.MetricPointStatus = MetricPointStatus.CollectPending;
}
break;
}
case AggregationType.Histogram:
{
lock (this.histogramBuckets.LockObject)
{
this.histogramBuckets.Count = this.histogramBuckets.CountVal;
this.histogramBuckets.Sum = this.histogramBuckets.SumVal;
if (outputDelta)
{
this.histogramBuckets.CountVal = 0;
this.histogramBuckets.SumVal = 0;
}
for (int i = 0; i < this.histogramBuckets.BucketCounts.Length; i++)
{
this.histogramBuckets.AggregatedBucketCounts[i] = this.histogramBuckets.BucketCounts[i];
if (outputDelta)
{
this.histogramBuckets.BucketCounts[i] = 0;
}
}
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
}
break;
}
case AggregationType.HistogramSumCount:
{
lock (this.histogramBuckets.LockObject)
{
this.histogramBuckets.Count = this.histogramBuckets.CountVal;
this.histogramBuckets.Sum = this.histogramBuckets.SumVal;
if (outputDelta)
{
this.histogramBuckets.CountVal = 0;
this.histogramBuckets.SumVal = 0;
}
this.MetricPointStatus = MetricPointStatus.NoCollectPending;
}
break;
}
}
}
}
}
| 1 | 22,729 | Do they need to be `internal`? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -176,7 +176,7 @@ func (b *PinnedMap) Iter(f MapIter) error {
args := cmd[1:]
printCommand(prog, args...)
- output, err := exec.Command(prog, args...).CombinedOutput()
+ output, err := exec.Command(prog, args...).Output()
if err != nil {
return errors.Errorf("failed to dump in map (%s): %s\n%s", b.versionedFilename(), err, output)
} | 1 | // Copyright (c) 2019-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bpf
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"golang.org/x/sys/unix"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type MapIter func(k, v []byte)
type Map interface {
GetName() string
// EnsureExists opens the map, creating and pinning it if needed.
EnsureExists() error
// MapFD gets the file descriptor of the map, only valid after calling EnsureExists().
MapFD() MapFD
// Path returns the path that the map is (to be) pinned to.
Path() string
Iter(MapIter) error
Update(k, v []byte) error
Get(k []byte) ([]byte, error)
Delete(k []byte) error
}
type MapParameters struct {
Filename string
Type string
KeySize int
ValueSize int
MaxEntries int
Name string
Flags int
Version int
}
func versionedStr(ver int, str string) string {
if ver <= 1 {
return str
}
return fmt.Sprintf("%s%d", str, ver)
}
func (mp *MapParameters) versionedName() string {
return versionedStr(mp.Version, mp.Name)
}
func (mp *MapParameters) versionedFilename() string {
return versionedStr(mp.Version, mp.Filename)
}
type MapContext struct {
RepinningEnabled bool
}
func (c *MapContext) NewPinnedMap(params MapParameters) Map {
if len(params.versionedName()) >= unix.BPF_OBJ_NAME_LEN {
logrus.WithField("name", params.Name).Panic("Bug: BPF map name too long")
}
m := &PinnedMap{
context: c,
MapParameters: params,
perCPU: strings.Contains(params.Type, "percpu"),
}
return m
}
type PinnedMap struct {
context *MapContext
MapParameters
fdLoaded bool
fd MapFD
perCPU bool
}
func (b *PinnedMap) GetName() string {
return b.versionedName()
}
func (b *PinnedMap) MapFD() MapFD {
if !b.fdLoaded {
logrus.Panic("MapFD() called without first calling EnsureExists()")
}
return b.fd
}
func (b *PinnedMap) Path() string {
return b.versionedFilename()
}
func (b *PinnedMap) Close() error {
err := b.fd.Close()
b.fdLoaded = false
b.fd = 0
return err
}
func (b *PinnedMap) RepinningEnabled() bool {
if b.context == nil {
return false
}
return b.context.RepinningEnabled
}
// DumpMapCmd returns the command that can be used to dump a map or an error
func DumpMapCmd(m Map) ([]string, error) {
if pm, ok := m.(*PinnedMap); ok {
return []string{
"bpftool",
"--json",
"--pretty",
"map",
"dump",
"pinned",
pm.versionedFilename(),
}, nil
}
return nil, errors.Errorf("unrecognized map type %T", m)
}
// IterMapCmdOutput iterates over the outout of a command obtained by DumpMapCmd
func IterMapCmdOutput(output []byte, f MapIter) error {
var mp []mapEntry
err := json.Unmarshal(output, &mp)
if err != nil {
return errors.Errorf("cannot parse json output: %v\n%s", err, output)
}
for _, me := range mp {
k, err := hexStringsToBytes(me.Key)
if err != nil {
return errors.Errorf("failed parsing entry %s key: %e", me, err)
}
v, err := hexStringsToBytes(me.Value)
if err != nil {
return errors.Errorf("failed parsing entry %s val: %e", me, err)
}
f(k, v)
}
return nil
}
func (b *PinnedMap) Iter(f MapIter) error {
cmd, err := DumpMapCmd(b)
if err != nil {
return err
}
prog := cmd[0]
args := cmd[1:]
printCommand(prog, args...)
output, err := exec.Command(prog, args...).CombinedOutput()
if err != nil {
return errors.Errorf("failed to dump in map (%s): %s\n%s", b.versionedFilename(), err, output)
}
if err := IterMapCmdOutput(output, f); err != nil {
return errors.WithMessagef(err, "map %s", b.versionedFilename())
}
return nil
}
func (b *PinnedMap) Update(k, v []byte) error {
if b.perCPU {
// Per-CPU maps need a buffer of value-size * num-CPUs.
logrus.Panic("Per-CPU operations not implemented")
}
return UpdateMapEntry(b.fd, k, v)
}
func (b *PinnedMap) Get(k []byte) ([]byte, error) {
if b.perCPU {
// Per-CPU maps need a buffer of value-size * num-CPUs.
logrus.Panic("Per-CPU operations not implemented")
}
return GetMapEntry(b.fd, k, b.ValueSize)
}
func appendBytes(strings []string, bytes []byte) []string {
for _, b := range bytes {
strings = append(strings, strconv.FormatInt(int64(b), 10))
}
return strings
}
func (b *PinnedMap) Delete(k []byte) error {
logrus.WithField("key", k).Debug("Deleting map entry")
args := make([]string, 0, 10+len(k))
args = append(args, "map", "delete",
"pinned", b.versionedFilename(),
"key")
args = appendBytes(args, k)
cmd := exec.Command("bpftool", args...)
out, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(string(out), "delete failed: No such file or directory") {
logrus.WithField("k", k).Debug("Item didn't exist.")
return os.ErrNotExist
}
logrus.WithField("out", string(out)).Error("Failed to run bpftool")
}
return err
}
func (b *PinnedMap) EnsureExists() error {
if b.fdLoaded {
return nil
}
_, err := MaybeMountBPFfs()
if err != nil {
logrus.WithError(err).Error("Failed to mount bpffs")
return err
}
// FIXME hard-coded dir
err = os.MkdirAll("/sys/fs/bpf/tc/globals", 0700)
if err != nil {
logrus.WithError(err).Error("Failed create dir")
return err
}
_, err = os.Stat(b.versionedFilename())
if err != nil {
if !os.IsNotExist(err) {
return err
}
logrus.Debug("Map file didn't exist")
if b.context.RepinningEnabled {
logrus.WithField("name", b.Name).Info("Looking for map by name (to repin it)")
err = RepinMap(b.versionedName(), b.versionedFilename())
if err != nil && !os.IsNotExist(err) {
return err
}
}
}
if err == nil {
logrus.Debug("Map file already exists, trying to open it")
b.fd, err = GetMapFDByPin(b.versionedFilename())
if err == nil {
b.fdLoaded = true
logrus.WithField("fd", b.fd).WithField("name", b.versionedFilename()).
Info("Loaded map file descriptor.")
}
return err
}
logrus.Debug("Map didn't exist, creating it")
cmd := exec.Command("bpftool", "map", "create", b.versionedFilename(),
"type", b.Type,
"key", fmt.Sprint(b.KeySize),
"value", fmt.Sprint(b.ValueSize),
"entries", fmt.Sprint(b.MaxEntries),
"name", b.versionedName(),
"flags", fmt.Sprint(b.Flags),
)
out, err := cmd.CombinedOutput()
if err != nil {
logrus.WithField("out", string(out)).Error("Failed to run bpftool")
return err
}
b.fd, err = GetMapFDByPin(b.versionedFilename())
if err == nil {
b.fdLoaded = true
logrus.WithField("fd", b.fd).WithField("name", b.versionedFilename()).
Info("Loaded map file descriptor.")
}
return err
}
type bpftoolMapMeta struct {
ID int `json:"id"`
Name string `json:"name"`
}
func RepinMap(name string, filename string) error {
cmd := exec.Command("bpftool", "map", "list", "-j")
out, err := cmd.Output()
if err != nil {
return errors.Wrap(err, "bpftool map list failed")
}
logrus.WithField("maps", string(out)).Debug("Got map metadata.")
var maps []bpftoolMapMeta
err = json.Unmarshal(out, &maps)
if err != nil {
return errors.Wrap(err, "bpftool returned bad JSON")
}
for _, m := range maps {
if m.Name == name {
// Found the map, try to repin it.
cmd := exec.Command("bpftool", "map", "pin", "id", fmt.Sprint(m.ID), filename)
return errors.Wrap(cmd.Run(), "bpftool failed to repin map")
}
}
return os.ErrNotExist
}
| 1 | 17,841 | Hit a flake here where I think there may have been some output to stderr that got mixed in with the output from Stdout. Hence switching to `Output()`, which does also capture stderr as `err.Stderr` | projectcalico-felix | go |
@@ -175,8 +175,8 @@ const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_FUNCTION_MAP{
{"cuMemcpy", {"hipMemcpy_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy2D due to different signatures
- {"cuMemcpy2D", {"hipMemcpy2D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
- {"cuMemcpy2D_v2", {"hipMemcpy2D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
+ {"cuMemcpy2D", {"hipMemcpyParam2D", "", CONV_MEMORY, API_DRIVER}},
+ {"cuMemcpy2D_v2", {"hipMemcpyParam2D", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMemcpy2DAsync due to different signatures
{"cuMemcpy2DAsync", {"hipMemcpy2DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}}, | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "CUDA2HIP.h"
// Map of all CUDA Driver API functions
const std::map<llvm::StringRef, hipCounter> CUDA_DRIVER_FUNCTION_MAP{
// 5.2. Error Handling
// no analogue
// NOTE: cudaGetErrorName and cuGetErrorName have different signatures
{"cuGetErrorName", {"hipGetErrorName_", "", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: cudaGetErrorString and cuGetErrorString have different signatures
{"cuGetErrorString", {"hipGetErrorString_", "", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED}},
// 5.3. Initialization
// no analogue
{"cuInit", {"hipInit", "", CONV_INIT, API_DRIVER}},
// 5.4 Version Management
// cudaDriverGetVersion
{"cuDriverGetVersion", {"hipDriverGetVersion", "", CONV_VERSION, API_DRIVER}},
// 5.5. Device Management
// cudaGetDevice
// NOTE: cudaGetDevice has additional attr: int ordinal
{"cuDeviceGet", {"hipGetDevice", "", CONV_DEVICE, API_DRIVER}},
// cudaDeviceGetAttribute
{"cuDeviceGetAttribute", {"hipDeviceGetAttribute", "", CONV_DEVICE, API_DRIVER}},
// cudaGetDeviceCount
{"cuDeviceGetCount", {"hipGetDeviceCount", "", CONV_DEVICE, API_DRIVER}},
// no analogue
{"cuDeviceGetLuid", {"hipDeviceGetLuid", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuDeviceGetName", {"hipDeviceGetName", "", CONV_DEVICE, API_DRIVER}},
// no analogue
{"cuDeviceGetUuid", {"hipDeviceGetUuid", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuDeviceTotalMem", {"hipDeviceTotalMem", "", CONV_DEVICE, API_DRIVER}},
{"cuDeviceTotalMem_v2", {"hipDeviceTotalMem", "", CONV_DEVICE, API_DRIVER}},
// 5.6. Device Management [DEPRECATED]
{"cuDeviceComputeCapability", {"hipDeviceComputeCapability", "", CONV_DEVICE, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaGetDeviceProperties due to different attributes: cudaDeviceProp and CUdevprop
{"cuDeviceGetProperties", {"hipGetDeviceProperties_", "", CONV_DEVICE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.7. Primary Context Management
// no analogues
{"cuDevicePrimaryCtxGetState", {"hipDevicePrimaryCtxGetState", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxRelease", {"hipDevicePrimaryCtxRelease", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxReset", {"hipDevicePrimaryCtxReset", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxRetain", {"hipDevicePrimaryCtxRetain", "", CONV_CONTEXT, API_DRIVER}},
{"cuDevicePrimaryCtxSetFlags", {"hipDevicePrimaryCtxSetFlags", "", CONV_CONTEXT, API_DRIVER}},
// 5.8. Context Management
// no analogues, except a few
{"cuCtxCreate", {"hipCtxCreate", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxCreate_v2", {"hipCtxCreate", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxDestroy", {"hipCtxDestroy", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxDestroy_v2", {"hipCtxDestroy", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetApiVersion", {"hipCtxGetApiVersion", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetCacheConfig", {"hipCtxGetCacheConfig", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetCurrent", {"hipCtxGetCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxGetDevice", {"hipCtxGetDevice", "", CONV_CONTEXT, API_DRIVER}},
// cudaGetDeviceFlags
// TODO: rename to hipGetDeviceFlags
{"cuCtxGetFlags", {"hipCtxGetFlags", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetLimit
{"cuCtxGetLimit", {"hipDeviceGetLimit", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetSharedMemConfig
// TODO: rename to hipDeviceGetSharedMemConfig
{"cuCtxGetSharedMemConfig", {"hipCtxGetSharedMemConfig", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceGetStreamPriorityRange
{"cuCtxGetStreamPriorityRange", {"hipDeviceGetStreamPriorityRange", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPopCurrent", {"hipCtxPopCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPopCurrent_v2", {"hipCtxPopCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPushCurrent", {"hipCtxPushCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxPushCurrent_v2", {"hipCtxPushCurrent", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxSetCacheConfig", {"hipCtxSetCacheConfig", "", CONV_CONTEXT, API_DRIVER}},
{"cuCtxSetCurrent", {"hipCtxSetCurrent", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSetLimit
{"cuCtxSetLimit", {"hipDeviceSetLimit", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSetSharedMemConfig
// TODO: rename to hipDeviceSetSharedMemConfig
{"cuCtxSetSharedMemConfig", {"hipCtxSetSharedMemConfig", "", CONV_CONTEXT, API_DRIVER}},
// cudaDeviceSynchronize
// TODO: rename to hipDeviceSynchronize
{"cuCtxSynchronize", {"hipCtxSynchronize", "", CONV_CONTEXT, API_DRIVER}},
// 5.9. Context Management [DEPRECATED]
// no analogues
{"cuCtxAttach", {"hipCtxAttach", "", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED}},
{"cuCtxDetach", {"hipCtxDetach", "", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED}},
// 5.10. Module Management
// no analogues
{"cuLinkAddData", {"hipLinkAddData", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddData_v2", {"hipLinkAddData", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddFile", {"hipLinkAddFile", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkAddFile_v2", {"hipLinkAddFile", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkComplete", {"hipLinkComplete", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkCreate", {"hipLinkCreate", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkCreate_v2", {"hipLinkCreate", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuLinkDestroy", {"hipLinkDestroy", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleGetFunction", {"hipModuleGetFunction", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetGlobal", {"hipModuleGetGlobal", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetGlobal_v2", {"hipModuleGetGlobal", "", CONV_MODULE, API_DRIVER}},
{"cuModuleGetSurfRef", {"hipModuleGetSurfRef", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleGetTexRef", {"hipModuleGetTexRef", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoad", {"hipModuleLoad", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadData", {"hipModuleLoadData", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadDataEx", {"hipModuleLoadDataEx", "", CONV_MODULE, API_DRIVER}},
{"cuModuleLoadFatBinary", {"hipModuleLoadFatBinary", "", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuModuleUnload", {"hipModuleUnload", "", CONV_MODULE, API_DRIVER}},
// 5.11. Memory Management
// no analogue
{"cuArray3DCreate", {"hipArray3DCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArray3DCreate_v2", {"hipArray3DCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArray3DGetDescriptor", {"hipArray3DGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArray3DGetDescriptor_v2", {"hipArray3DGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayCreate", {"hipArrayCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArrayCreate_v2", {"hipArrayCreate", "", CONV_MEMORY, API_DRIVER}},
{"cuArrayDestroy", {"hipArrayDestroy", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayGetDescriptor", {"hipArrayGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuArrayGetDescriptor_v2", {"hipArrayGetDescriptor", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDeviceGetByPCIBusId
{"cuDeviceGetByPCIBusId", {"hipDeviceGetByPCIBusId", "", CONV_MEMORY, API_DRIVER}},
// cudaDeviceGetPCIBusId
{"cuDeviceGetPCIBusId", {"hipDeviceGetPCIBusId", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcCloseMemHandle
{"cuIpcCloseMemHandle", {"hipIpcCloseMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcGetEventHandle
{"cuIpcGetEventHandle", {"hipIpcGetEventHandle", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaIpcGetMemHandle
{"cuIpcGetMemHandle", {"hipIpcGetMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaIpcOpenEventHandle
{"cuIpcOpenEventHandle", {"hipIpcOpenEventHandle", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaIpcOpenMemHandle
{"cuIpcOpenMemHandle", {"hipIpcOpenMemHandle", "", CONV_MEMORY, API_DRIVER}},
// cudaMalloc
{"cuMemAlloc", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
{"cuMemAlloc_v2", {"hipMalloc", "", CONV_MEMORY, API_DRIVER}},
// cudaHostAlloc
{"cuMemAllocHost", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemAllocHost_v2", {"hipMemAllocHost", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemAllocManaged", {"hipMemAllocManaged", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMallocPitch due to different signatures
{"cuMemAllocPitch", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemAllocPitch_v2", {"hipMemAllocPitch", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy due to different signatures
{"cuMemcpy", {"hipMemcpy_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy2D due to different signatures
{"cuMemcpy2D", {"hipMemcpy2D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy2D_v2", {"hipMemcpy2D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy2DAsync due to different signatures
{"cuMemcpy2DAsync", {"hipMemcpy2DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy2DAsync_v2", {"hipMemcpy2DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpy2DUnaligned", {"hipMemcpy2DUnaligned", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy2DUnaligned_v2", {"hipMemcpy2DUnaligned", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3D due to different signatures
{"cuMemcpy3D", {"hipMemcpy3D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy3D_v2", {"hipMemcpy3D_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DAsync due to different signatures
{"cuMemcpy3DAsync", {"hipMemcpy3DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpy3DAsync_v2", {"hipMemcpy3DAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DPeer due to different signatures
{"cuMemcpy3DPeer", {"hipMemcpy3DPeer_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpy3DPeerAsync due to different signatures
{"cuMemcpy3DPeerAsync", {"hipMemcpy3DPeerAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyAsync due to different signatures
{"cuMemcpyAsync", {"hipMemcpyAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyArrayToArray due to different signatures
{"cuMemcpyAtoA", {"hipMemcpyAtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoA_v2", {"hipMemcpyAtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyAtoD", {"hipMemcpyAtoD", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoD_v2", {"hipMemcpyAtoD", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyAtoH", {"hipMemcpyAtoH", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyAtoH_v2", {"hipMemcpyAtoH", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyAtoHAsync", {"hipMemcpyAtoHAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyAtoHAsync_v2", {"hipMemcpyAtoHAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyDtoA", {"hipMemcpyDtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyDtoA_v2", {"hipMemcpyDtoA", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyDtoD", {"hipMemcpyDtoD", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoD_v2", {"hipMemcpyDtoD", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoDAsync", {"hipMemcpyDtoDAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoDAsync_v2", {"hipMemcpyDtoDAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoH", {"hipMemcpyDtoH", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoH_v2", {"hipMemcpyDtoH", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyDtoHAsync", {"hipMemcpyDtoHAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyDtoHAsync_v2", {"hipMemcpyDtoHAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoA", {"hipMemcpyHtoA", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoA_v2", {"hipMemcpyHtoA", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoAAsync", {"hipMemcpyHtoAAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemcpyHtoAAsync_v2", {"hipMemcpyHtoAAsync", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemcpyHtoD", {"hipMemcpyHtoD", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoD_v2", {"hipMemcpyHtoD", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemcpyHtoDAsync", {"hipMemcpyHtoDAsync", "", CONV_MEMORY, API_DRIVER}},
{"cuMemcpyHtoDAsync_v2", {"hipMemcpyHtoDAsync", "", CONV_MEMORY, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaMemcpyPeer due to different signatures
{"cuMemcpyPeer", {"hipMemcpyPeer_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMemcpyPeerAsync due to different signatures
{"cuMemcpyPeerAsync", {"hipMemcpyPeerAsync_", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaFree
{"cuMemFree", {"hipFree", "", CONV_MEMORY, API_DRIVER}},
{"cuMemFree_v2", {"hipFree", "", CONV_MEMORY, API_DRIVER}},
// cudaFreeHost
{"cuMemFreeHost", {"hipHostFree", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemGetAddressRange", {"hipMemGetAddressRange", "", CONV_MEMORY, API_DRIVER}},
{"cuMemGetAddressRange_v2", {"hipMemGetAddressRange", "", CONV_MEMORY, API_DRIVER}},
// cudaMemGetInfo
{"cuMemGetInfo", {"hipMemGetInfo", "", CONV_MEMORY, API_DRIVER}},
{"cuMemGetInfo_v2", {"hipMemGetInfo", "", CONV_MEMORY, API_DRIVER}},
// cudaHostAlloc
{"cuMemHostAlloc", {"hipHostMalloc", "", CONV_MEMORY, API_DRIVER}},
// cudaHostGetDevicePointer
{"cuMemHostGetDevicePointer", {"hipHostGetDevicePointer", "", CONV_MEMORY, API_DRIVER}},
{"cuMemHostGetDevicePointer_v2", {"hipHostGetDevicePointer", "", CONV_MEMORY, API_DRIVER}},
// cudaHostGetFlags
{"cuMemHostGetFlags", {"hipMemHostGetFlags", "", CONV_MEMORY, API_DRIVER}},
// cudaHostRegister
{"cuMemHostRegister", {"hipHostRegister", "", CONV_MEMORY, API_DRIVER}},
{"cuMemHostRegister_v2", {"hipHostRegister", "", CONV_MEMORY, API_DRIVER}},
// cudaHostUnregister
{"cuMemHostUnregister", {"hipHostUnregister", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD16", {"hipMemsetD16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD16_v2", {"hipMemsetD16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD16Async", {"hipMemsetD16Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D16", {"hipMemsetD2D16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D16_v2", {"hipMemsetD2D16", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D16Async", {"hipMemsetD2D16Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D32", {"hipMemsetD2D32", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D32_v2", {"hipMemsetD2D32", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D32Async", {"hipMemsetD2D32Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D8", {"hipMemsetD2D8", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuMemsetD2D8_v2", {"hipMemsetD2D8", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuMemsetD2D8Async", {"hipMemsetD2D8Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemset
{"cuMemsetD32", {"hipMemsetD32", "", CONV_MEMORY, API_DRIVER}},
{"cuMemsetD32_v2", {"hipMemsetD32", "", CONV_MEMORY, API_DRIVER}},
// cudaMemsetAsync
{"cuMemsetD32Async", {"hipMemsetD32Async", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD8", {"hipMemsetD8", "", CONV_MEMORY, API_DRIVER}},
{"cuMemsetD8_v2", {"hipMemsetD8", "", CONV_MEMORY, API_DRIVER}},
// no analogue
{"cuMemsetD8Async", {"hipMemsetD8Async", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaMallocMipmappedArray due to different signatures
{"cuMipmappedArrayCreate", {"hipMipmappedArrayCreate", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFreeMipmappedArray due to different signatures
{"cuMipmappedArrayDestroy", {"hipMipmappedArrayDestroy", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetMipmappedArrayLevel due to different signatures
{"cuMipmappedArrayGetLevel", {"hipMipmappedArrayGetLevel", "", CONV_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.12. Unified Addressing
// cudaMemAdvise
{"cuMemAdvise", {"hipMemAdvise", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// TODO: double check cudaMemPrefetchAsync
{"cuMemPrefetchAsync", {"hipMemPrefetchAsync_", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemRangeGetAttribute
{"cuMemRangeGetAttribute", {"hipMemRangeGetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// cudaMemRangeGetAttributes
{"cuMemRangeGetAttributes", {"hipMemRangeGetAttributes", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuPointerGetAttribute", {"hipPointerGetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaPointerGetAttributes due to different signatures
{"cuPointerGetAttributes", {"hipPointerGetAttributes", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuPointerSetAttribute", {"hipPointerSetAttribute", "", CONV_ADDRESSING, API_DRIVER, HIP_UNSUPPORTED}},
// 5.13. Stream Management
// cudaStreamAddCallback
{"cuStreamAddCallback", {"hipStreamAddCallback", "", CONV_STREAM, API_DRIVER}},
// cudaStreamAttachMemAsync
{"cuStreamAttachMemAsync", {"hipStreamAttachMemAsync", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamBeginCapture
{"cuStreamBeginCapture", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamBeginCapture_v2", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamBeginCapture_ptsz", {"hipStreamBeginCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamCreateWithFlags
{"cuStreamCreate", {"hipStreamCreateWithFlags", "", CONV_STREAM, API_DRIVER}},
// cudaStreamCreateWithPriority
{"cuStreamCreateWithPriority", {"hipStreamCreateWithPriority", "", CONV_STREAM, API_DRIVER}},
// cudaStreamDestroy
{"cuStreamDestroy", {"hipStreamDestroy", "", CONV_STREAM, API_DRIVER}},
{"cuStreamDestroy_v2", {"hipStreamDestroy", "", CONV_STREAM, API_DRIVER}},
// cudaStreamEndCapture
{"cuStreamEndCapture", {"hipStreamEndCapture", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamGetCaptureInfo
{"cuStreamGetCaptureInfo", {"hipStreamGetCaptureInfo", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuStreamGetCtx", {"hipStreamGetContext", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamGetFlags
{"cuStreamGetFlags", {"hipStreamGetFlags", "", CONV_STREAM, API_DRIVER}},
// cudaStreamGetPriority
{"cuStreamGetPriority", {"hipStreamGetPriority", "", CONV_STREAM, API_DRIVER}},
// cudaStreamIsCapturing
{"cuStreamIsCapturing", {"hipStreamIsCapturing", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// cudaStreamQuery
{"cuStreamQuery", {"hipStreamQuery", "", CONV_STREAM, API_DRIVER}},
// cudaStreamSynchronize
{"cuStreamSynchronize", {"hipStreamSynchronize", "", CONV_STREAM, API_DRIVER}},
// cudaStreamWaitEvent
{"cuStreamWaitEvent", {"hipStreamWaitEvent", "", CONV_STREAM, API_DRIVER}},
// cudaThreadExchangeStreamCaptureMode
{"cuThreadExchangeStreamCaptureMode", {"hipThreadExchangeStreamCaptureMode", "", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED}},
// 5.14. Event Management
// cudaEventCreateWithFlags
{"cuEventCreate", {"hipEventCreateWithFlags", "", CONV_EVENT, API_DRIVER}},
// cudaEventDestroy
{"cuEventDestroy", {"hipEventDestroy", "", CONV_EVENT, API_DRIVER}},
{"cuEventDestroy_v2", {"hipEventDestroy", "", CONV_EVENT, API_DRIVER}},
// cudaEventElapsedTime
{"cuEventElapsedTime", {"hipEventElapsedTime", "", CONV_EVENT, API_DRIVER}},
// cudaEventQuery
{"cuEventQuery", {"hipEventQuery", "", CONV_EVENT, API_DRIVER}},
// cudaEventRecord
{"cuEventRecord", {"hipEventRecord", "", CONV_EVENT, API_DRIVER}},
// cudaEventSynchronize
{"cuEventSynchronize", {"hipEventSynchronize", "", CONV_EVENT, API_DRIVER}},
// 5.15. External Resource Interoperability
// cudaDestroyExternalMemory
{"cuDestroyExternalMemory", {"hipDestroyExternalMemory", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroyExternalSemaphore
{"cuDestroyExternalSemaphore", {"hipDestroyExternalSemaphore", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaExternalMemoryGetMappedBuffer
{"cuExternalMemoryGetMappedBuffer", {"hipExternalMemoryGetMappedBuffer", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaExternalMemoryGetMappedMipmappedArray
{"cuExternalMemoryGetMappedMipmappedArray", {"hipExternalMemoryGetMappedMipmappedArray", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaImportExternalMemory
{"cuImportExternalMemory", {"hipImportExternalMemory", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaImportExternalSemaphore
{"cuImportExternalSemaphore", {"hipImportExternalSemaphore", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaSignalExternalSemaphoresAsync
{"cuSignalExternalSemaphoresAsync", {"hipSignalExternalSemaphoresAsync", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// cudaWaitExternalSemaphoresAsync
{"cuWaitExternalSemaphoresAsync", {"hipWaitExternalSemaphoresAsync", "", CONV_EXT_RES, API_DRIVER, HIP_UNSUPPORTED}},
// 5.16. Stream Memory Operations
// no analogues
{"cuStreamBatchMemOp", {"hipStreamBatchMemOp", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWaitValue32", {"hipStreamWaitValue32", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWaitValue64", {"hipStreamWaitValue64", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWriteValue32", {"hipStreamWriteValue32", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
{"cuStreamWriteValue64", {"hipStreamWriteValue64", "", CONV_STREAM_MEMORY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.17.Execution Control
// no analogue
{"cuFuncGetAttribute", {"hipFuncGetAttribute", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFuncSetAttribute due to different signatures
{"cuFuncSetAttribute", {"hipFuncSetAttribute", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFuncSetCacheConfig due to different signatures
{"cuFuncSetCacheConfig", {"hipFuncSetCacheConfig", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaFuncSetSharedMemConfig due to different signatures
{"cuFuncSetSharedMemConfig", {"hipFuncSetSharedMemConfig", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchCooperativeKernel due to different signatures
{"cuLaunchCooperativeKernel", {"hipLaunchCooperativeKernel_", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchCooperativeKernelMultiDevice due to different signatures
{"cuLaunchCooperativeKernelMultiDevice", {"hipLaunchCooperativeKernelMultiDevice_", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// cudaLaunchHostFunc
{"cuLaunchHostFunc", {"hipLaunchHostFunc", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunchKernel due to different signatures
{"cuLaunchKernel", {"hipModuleLaunchKernel", "", CONV_EXECUTION, API_DRIVER}},
// 5.18.Execution Control [DEPRECATED]
// no analogue
{"cuFuncSetBlockShape", {"hipFuncSetBlockShape", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuFuncSetSharedSize", {"hipFuncSetSharedSize", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaLaunch due to different signatures
{"cuLaunch", {"hipLaunch", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuLaunchGrid", {"hipLaunchGrid", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuLaunchGridAsync", {"hipLaunchGridAsync", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetf", {"hipParamSetf", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSeti", {"hipParamSeti", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetSize", {"hipParamSetSize", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetTexRef", {"hipParamSetTexRef", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuParamSetv", {"hipParamSetv", "", CONV_EXECUTION, API_DRIVER, HIP_UNSUPPORTED}},
// 5.19. Graph Management
// cudaGraphAddChildGraphNode
{"cuGraphAddChildGraphNode", {"hipGraphAddChildGraphNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddDependencies
{"cuGraphAddDependencies", {"hipGraphAddDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddEmptyNode
{"cuGraphAddEmptyNode", {"hipGraphAddEmptyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddHostNode
{"cuGraphAddHostNode", {"hipGraphAddHostNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddKernelNode
{"cuGraphAddKernelNode", {"hipGraphAddKernelNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddMemcpyNode
{"cuGraphAddMemcpyNode", {"hipGraphAddMemcpyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphAddMemsetNode
{"cuGraphAddMemsetNode", {"hipGraphAddMemsetNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphChildGraphNodeGetGraph
{"cuGraphChildGraphNodeGetGraph", {"hipGraphChildGraphNodeGetGraph", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphClone
{"cuGraphClone", {"hipGraphClone", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphCreate
{"cuGraphCreate", {"hipGraphCreate", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphDestroy
{"cuGraphDestroy", {"hipGraphDestroy", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphDestroyNode
{"cuGraphDestroyNode", {"hipGraphDestroyNode", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphExecDestroy
{"cuGraphExecDestroy", {"hipGraphExecDestroy", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetEdges
{"cuGraphGetEdges", {"hipGraphGetEdges", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetNodes
{"cuGraphGetNodes", {"hipGraphGetNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphGetRootNodes
{"cuGraphGetRootNodes", {"hipGraphGetRootNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphHostNodeGetParams
{"cuGraphHostNodeGetParams", {"hipGraphHostNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphHostNodeSetParams
{"cuGraphHostNodeSetParams", {"hipGraphHostNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphInstantiate
{"cuGraphInstantiate", {"hipGraphInstantiate", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphExecKernelNodeSetParams
{"cuGraphExecKernelNodeSetParams", {"hipGraphExecKernelNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphKernelNodeGetParams
{"cuGraphKernelNodeGetParams", {"hipGraphKernelNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphKernelNodeSetParams
{"cuGraphKernelNodeSetParams", {"hipGraphKernelNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphLaunch
{"cuGraphLaunch", {"hipGraphLaunch", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemcpyNodeGetParams
{"cuGraphMemcpyNodeGetParams", {"hipGraphMemcpyNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemcpyNodeSetParams
{"cuGraphMemcpyNodeSetParams", {"hipGraphMemcpyNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemsetNodeGetParams
{"cuGraphMemsetNodeGetParams", {"hipGraphMemsetNodeGetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphMemsetNodeSetParams
{"cuGraphMemsetNodeSetParams", {"hipGraphMemsetNodeSetParams", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeFindInClone
{"cuGraphNodeFindInClone", {"hipGraphNodeFindInClone", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetDependencies
{"cuGraphNodeGetDependencies", {"hipGraphNodeGetDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetDependentNodes
{"cuGraphNodeGetDependentNodes", {"hipGraphNodeGetDependentNodes", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphNodeGetType
{"cuGraphNodeGetType", {"hipGraphNodeGetType", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphRemoveDependencies
{"cuGraphRemoveDependencies", {"hipGraphRemoveDependencies", "", CONV_GRAPH, API_DRIVER, HIP_UNSUPPORTED}},
// 5.20. Occupancy
// cudaOccupancyMaxActiveBlocksPerMultiprocessor
{"cuOccupancyMaxActiveBlocksPerMultiprocessor", {"hipOccupancyMaxActiveBlocksPerMultiprocessor", "", CONV_OCCUPANCY, API_DRIVER}},
// cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags
{"cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", {"hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", "", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED}},
// cudaOccupancyMaxPotentialBlockSize
{"cuOccupancyMaxPotentialBlockSize", {"hipOccupancyMaxPotentialBlockSize", "", CONV_OCCUPANCY, API_DRIVER}},
// cudaOccupancyMaxPotentialBlockSizeWithFlags
{"cuOccupancyMaxPotentialBlockSizeWithFlags", {"hipOccupancyMaxPotentialBlockSizeWithFlags", "", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED}},
// 5.21. Texture Reference Management
// no analogues
{"cuTexRefGetAddress", {"hipTexRefGetAddress", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetAddress_v2", {"hipTexRefGetAddress", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetAddressMode", {"hipTexRefGetAddressMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetArray", {"hipTexRefGetArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetBorderColor", {"hipTexRefGetBorderColor", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFilterMode", {"hipTexRefGetFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFlags", {"hipTexRefGetFlags", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetFormat", {"hipTexRefGetFormat", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMaxAnisotropy", {"hipTexRefGetMaxAnisotropy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapFilterMode", {"hipTexRefGetMipmapFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapLevelBias", {"hipTexRefGetMipmapLevelBias", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmapLevelClamp", {"hipTexRefGetMipmapLevelClamp", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefGetMipmappedArray", {"hipTexRefGetMipmappedArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetAddress", {"hipTexRefSetAddress", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress_v2", {"hipTexRefSetAddress", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D_v2", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddress2D_v3", {"hipTexRefSetAddress2D", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetAddressMode", {"hipTexRefSetAddressMode", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetArray", {"hipTexRefSetArray", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetBorderColor", {"hipTexRefSetBorderColor", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetFilterMode", {"hipTexRefSetFilterMode", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetFlags", {"hipTexRefSetFlags", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetFormat", {"hipTexRefSetFormat", "", CONV_TEXTURE, API_DRIVER}},
{"cuTexRefSetMaxAnisotropy", {"hipTexRefSetMaxAnisotropy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapFilterMode", {"hipTexRefSetMipmapFilterMode", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapLevelBias", {"hipTexRefSetMipmapLevelBias", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmapLevelClamp", {"hipTexRefSetMipmapLevelClamp", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefSetMipmappedArray", {"hipTexRefSetMipmappedArray", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.22. Texture Reference Management [DEPRECATED]
// no analogues
{"cuTexRefCreate", {"hipTexRefCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuTexRefDestroy", {"hipTexRefDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.23. Surface Reference Management
// no analogues
{"cuSurfRefGetArray", {"hipSurfRefGetArray", "", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED}},
{"cuSurfRefSetArray", {"hipSurfRefSetArray", "", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.24. Texture Object Management
// no analogue
// NOTE: Not equal to cudaCreateTextureObject due to different signatures
{"cuTexObjectCreate", {"hipTexObjectCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroyTextureObject
{"cuTexObjectDestroy", {"hipTexObjectDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetTextureObjectResourceDesc due to different signatures
{"cuTexObjectGetResourceDesc", {"hipTexObjectGetResourceDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGetTextureObjectResourceViewDesc
{"cuTexObjectGetResourceViewDesc", {"hipTexObjectGetResourceViewDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetTextureObjectTextureDesc due to different signatures
{"cuTexObjectGetTextureDesc", {"hipTexObjectGetTextureDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.25. Surface Object Management
// no analogue
// NOTE: Not equal to cudaCreateSurfaceObject due to different signatures
{"cuSurfObjectCreate", {"hipSurfObjectCreate", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// cudaDestroySurfaceObject
{"cuSurfObjectDestroy", {"hipSurfObjectDestroy", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGetSurfaceObjectResourceDesc due to different signatures
{"cuSurfObjectGetResourceDesc", {"hipSurfObjectGetResourceDesc", "", CONV_TEXTURE, API_DRIVER, HIP_UNSUPPORTED}},
// 5.26. Peer Context Memory Access
// no analogue
// NOTE: Not equal to cudaDeviceEnablePeerAccess due to different signatures
{"cuCtxEnablePeerAccess", {"hipCtxEnablePeerAccess", "", CONV_PEER, API_DRIVER}},
// no analogue
// NOTE: Not equal to cudaDeviceDisablePeerAccess due to different signatures
{"cuCtxDisablePeerAccess", {"hipCtxDisablePeerAccess", "", CONV_PEER, API_DRIVER}},
// cudaDeviceCanAccessPeer
{"cuDeviceCanAccessPeer", {"hipDeviceCanAccessPeer", "", CONV_PEER, API_DRIVER}},
// cudaDeviceGetP2PAttribute
{"cuDeviceGetP2PAttribute", {"hipDeviceGetP2PAttribute", "", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED}},
// 5.27. Graphics Interoperability
// cudaGraphicsMapResources
{"cuGraphicsMapResources", {"hipGraphicsMapResources", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedMipmappedArray
{"cuGraphicsResourceGetMappedMipmappedArray", {"hipGraphicsResourceGetMappedMipmappedArray", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedPointer
{"cuGraphicsResourceGetMappedPointer", {"hipGraphicsResourceGetMappedPointer", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedPointer
{"cuGraphicsResourceGetMappedPointer_v2", {"hipGraphicsResourceGetMappedPointer", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceSetMapFlags
{"cuGraphicsResourceSetMapFlags", {"hipGraphicsResourceSetMapFlags", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceSetMapFlags
{"cuGraphicsResourceSetMapFlags_v2", {"hipGraphicsResourceSetMapFlags", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsSubResourceGetMappedArray
{"cuGraphicsSubResourceGetMappedArray", {"hipGraphicsSubResourceGetMappedArray", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsUnmapResources
{"cuGraphicsUnmapResources", {"hipGraphicsUnmapResources", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsUnregisterResource
{"cuGraphicsUnregisterResource", {"hipGraphicsUnregisterResource", "", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED}},
// 5.28. Profiler Control
// cudaProfilerInitialize
{"cuProfilerInitialize", {"hipProfilerInitialize", "", CONV_PROFILER, API_DRIVER, HIP_UNSUPPORTED}},
// cudaProfilerStart
{"cuProfilerStart", {"hipProfilerStart", "", CONV_PROFILER, API_DRIVER}},
// cudaProfilerStop
{"cuProfilerStop", {"hipProfilerStop", "", CONV_PROFILER, API_DRIVER}},
// 5.29. OpenGL Interoperability
// cudaGLGetDevices
{"cuGLGetDevices", {"hipGLGetDevices", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsGLRegisterBuffer
{"cuGraphicsGLRegisterBuffer", {"hipGraphicsGLRegisterBuffer", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsGLRegisterImage
{"cuGraphicsGLRegisterImage", {"hipGraphicsGLRegisterImage", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaWGLGetDevice
{"cuWGLGetDevice", {"hipWGLGetDevice", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// 5.29. OpenGL Interoperability [DEPRECATED]
// no analogue
{"cuGLCtxCreate", {"hipGLCtxCreate", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuGLInit", {"hipGLInit", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGLMapBufferObject due to different signatures
{"cuGLMapBufferObject", {"hipGLMapBufferObject_", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
// NOTE: Not equal to cudaGLMapBufferObjectAsync due to different signatures
{"cuGLMapBufferObjectAsync", {"hipGLMapBufferObjectAsync_", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLRegisterBufferObject
{"cuGLRegisterBufferObject", {"hipGLRegisterBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLSetBufferObjectMapFlags
{"cuGLSetBufferObjectMapFlags", {"hipGLSetBufferObjectMapFlags", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnmapBufferObject
{"cuGLUnmapBufferObject", {"hipGLUnmapBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnmapBufferObjectAsync
{"cuGLUnmapBufferObjectAsync", {"hipGLUnmapBufferObjectAsync", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGLUnregisterBufferObject
{"cuGLUnregisterBufferObject", {"hipGLUnregisterBufferObject", "", CONV_OPENGL, API_DRIVER, HIP_UNSUPPORTED}},
// 5.30.Direct3D 9 Interoperability
// no analogue
{"cuD3D9CtxCreate", {"hipD3D9CtxCreate", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D9CtxCreateOnDevice", {"hipD3D9CtxCreateOnDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDevice
{"cuD3D9GetDevice", {"hipD3D9GetDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDevices
{"cuD3D9GetDevices", {"hipD3D9GetDevices", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9GetDirect3DDevice
{"cuD3D9GetDirect3DDevice", {"hipD3D9GetDirect3DDevice", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D9RegisterResource
{"cuGraphicsD3D9RegisterResource", {"hipGraphicsD3D9RegisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// 5.30.Direct3D 9 Interoperability [DEPRECATED]
// cudaD3D9MapResources
{"cuD3D9MapResources", {"hipD3D9MapResources", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9RegisterResource
{"cuD3D9RegisterResource", {"hipD3D9RegisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedArray
{"cuD3D9ResourceGetMappedArray", {"hipD3D9ResourceGetMappedArray", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedPitch
{"cuD3D9ResourceGetMappedPitch", {"hipD3D9ResourceGetMappedPitch", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedPointer
{"cuD3D9ResourceGetMappedPointer", {"hipD3D9ResourceGetMappedPointer", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetMappedSize
{"cuD3D9ResourceGetMappedSize", {"hipD3D9ResourceGetMappedSize", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceGetSurfaceDimensions
{"cuD3D9ResourceGetSurfaceDimensions", {"hipD3D9ResourceGetSurfaceDimensions", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9ResourceSetMapFlags
{"cuD3D9ResourceSetMapFlags", {"hipD3D9ResourceSetMapFlags", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9UnmapResources
{"cuD3D9UnmapResources", {"hipD3D9UnmapResources", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D9UnregisterResource
{"cuD3D9UnregisterResource", {"hipD3D9UnregisterResource", "", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED}},
// 5.31. Direct3D 10 Interoperability
// cudaD3D10GetDevice
{"cuD3D10GetDevice", {"hipD3D10GetDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10GetDevices
{"cuD3D10GetDevices", {"hipD3D10GetDevices", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D10RegisterResource
{"cuGraphicsD3D10RegisterResource", {"hipGraphicsD3D10RegisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// 5.31. Direct3D 10 Interoperability [DEPRECATED]
// no analogue
{"cuD3D10CtxCreate", {"hipD3D10CtxCreate", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D10CtxCreateOnDevice", {"hipD3D10CtxCreateOnDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10GetDirect3DDevice
{"cuD3D10GetDirect3DDevice", {"hipD3D10GetDirect3DDevice", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10MapResources
{"cuD3D10MapResources", {"hipD3D10MapResources", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10RegisterResource
{"cuD3D10RegisterResource", {"hipD3D10RegisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedArray
{"cuD3D10ResourceGetMappedArray", {"hipD3D10ResourceGetMappedArray", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedPitch
{"cuD3D10ResourceGetMappedPitch", {"hipD3D10ResourceGetMappedPitch", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedPointer
{"cuD3D10ResourceGetMappedPointer", {"hipD3D10ResourceGetMappedPointer", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetMappedSize
{"cuD3D10ResourceGetMappedSize", {"hipD3D10ResourceGetMappedSize", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceGetSurfaceDimensions
{"cuD3D10ResourceGetSurfaceDimensions", {"hipD3D10ResourceGetSurfaceDimensions", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10ResourceSetMapFlags
{"cuD310ResourceSetMapFlags", {"hipD3D10ResourceSetMapFlags", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10UnmapResources
{"cuD3D10UnmapResources", {"hipD3D10UnmapResources", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D10UnregisterResource
{"cuD3D10UnregisterResource", {"hipD3D10UnregisterResource", "", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED}},
// 5.32. Direct3D 11 Interoperability
// cudaD3D11GetDevice
{"cuD3D11GetDevice", {"hipD3D11GetDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D11GetDevices
{"cuD3D11GetDevices", {"hipD3D11GetDevices", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsD3D11RegisterResource
{"cuGraphicsD3D11RegisterResource", {"hipGraphicsD3D11RegisterResource", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// 5.32. Direct3D 11 Interoperability [DEPRECATED]
// no analogue
{"cuD3D11CtxCreate", {"hipD3D11CtxCreate", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuD3D11CtxCreateOnDevice", {"hipD3D11CtxCreateOnDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// cudaD3D11GetDirect3DDevice
{"cuD3D11GetDirect3DDevice", {"hipD3D11GetDirect3DDevice", "", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED}},
// 5.33. VDPAU Interoperability
// cudaGraphicsVDPAURegisterOutputSurface
{"cuGraphicsVDPAURegisterOutputSurface", {"hipGraphicsVDPAURegisterOutputSurface", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsVDPAURegisterVideoSurface
{"cuGraphicsVDPAURegisterVideoSurface", {"hipGraphicsVDPAURegisterVideoSurface", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// cudaVDPAUGetDevice
{"cuVDPAUGetDevice", {"hipVDPAUGetDevice", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// no analogue
{"cuVDPAUCtxCreate", {"hipVDPAUCtxCreate", "", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED}},
// 5.34. EGL Interoperability
// cudaEGLStreamConsumerAcquireFrame
{"cuEGLStreamConsumerAcquireFrame", {"hipEGLStreamConsumerAcquireFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerConnect
{"cuEGLStreamConsumerConnect", {"hipEGLStreamConsumerConnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerConnectWithFlags
{"cuEGLStreamConsumerConnectWithFlags", {"hipEGLStreamConsumerConnectWithFlags", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerDisconnect
{"cuEGLStreamConsumerDisconnect", {"hipEGLStreamConsumerDisconnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamConsumerReleaseFrame
{"cuEGLStreamConsumerReleaseFrame", {"hipEGLStreamConsumerReleaseFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerConnect
{"cuEGLStreamProducerConnect", {"hipEGLStreamProducerConnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerDisconnect
{"cuEGLStreamProducerDisconnect", {"hipEGLStreamProducerDisconnect", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerPresentFrame
{"cuEGLStreamProducerPresentFrame", {"hipEGLStreamProducerPresentFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEGLStreamProducerReturnFrame
{"cuEGLStreamProducerReturnFrame", {"hipEGLStreamProducerReturnFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsEGLRegisterImage
{"cuGraphicsEGLRegisterImage", {"hipGraphicsEGLRegisterImage", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaGraphicsResourceGetMappedEglFrame
{"cuGraphicsResourceGetMappedEglFrame", {"hipGraphicsResourceGetMappedEglFrame", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
// cudaEventCreateFromEGLSync
{"cuEventCreateFromEGLSync", {"hipEventCreateFromEGLSync", "", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED}},
};
| 1 | 7,610 | Is there any official CUDA API called cuMemcpy2D_v2? | ROCm-Developer-Tools-HIP | cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.